2024-12-08 00:46:21,157 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-12-08 00:46:21,173 main DEBUG Took 0.014126 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-08 00:46:21,174 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-08 00:46:21,174 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-08 00:46:21,175 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-08 00:46:21,177 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 00:46:21,185 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-08 00:46:21,197 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 00:46:21,199 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 00:46:21,200 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 00:46:21,200 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 00:46:21,201 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 00:46:21,201 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 00:46:21,202 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 00:46:21,203 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 00:46:21,204 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 00:46:21,204 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 00:46:21,205 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 00:46:21,206 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 00:46:21,206 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 00:46:21,207 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 00:46:21,207 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 00:46:21,208 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 00:46:21,208 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 00:46:21,209 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 00:46:21,209 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 00:46:21,210 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 00:46:21,210 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 00:46:21,211 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 00:46:21,212 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 00:46:21,212 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 00:46:21,213 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 00:46:21,213 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-08 00:46:21,215 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 00:46:21,217 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-08 00:46:21,219 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-08 00:46:21,220 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-08 00:46:21,222 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-08 00:46:21,222 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-08 00:46:21,233 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-08 00:46:21,237 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-08 00:46:21,239 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-08 00:46:21,239 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-08 00:46:21,240 main DEBUG createAppenders(={Console}) 2024-12-08 00:46:21,241 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-12-08 00:46:21,242 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-12-08 00:46:21,242 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-12-08 00:46:21,243 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-08 00:46:21,243 main DEBUG OutputStream closed 2024-12-08 00:46:21,244 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-08 00:46:21,244 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-08 00:46:21,244 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-12-08 00:46:21,304 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-08 00:46:21,306 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-08 00:46:21,307 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-08 00:46:21,308 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-08 00:46:21,309 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-08 00:46:21,309 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-08 00:46:21,309 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-08 00:46:21,310 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-08 00:46:21,310 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-08 00:46:21,310 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-08 00:46:21,311 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-08 00:46:21,311 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-08 00:46:21,311 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-08 00:46:21,311 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-08 00:46:21,312 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-08 00:46:21,312 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-08 00:46:21,312 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-08 00:46:21,313 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-08 00:46:21,315 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-08 00:46:21,315 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-12-08 00:46:21,315 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-08 00:46:21,316 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-12-08T00:46:21,552 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/52e2be55-3edc-d50d-dd17-5d14a52ab0cc 2024-12-08 00:46:21,554 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-08 00:46:21,555 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-08T00:46:21,562 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-12-08T00:46:21,591 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=164, ProcessCount=11, AvailableMemoryMB=18454 2024-12-08T00:46:21,593 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-08T00:46:21,606 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/52e2be55-3edc-d50d-dd17-5d14a52ab0cc/cluster_a09d45df-b862-74bd-d733-bc6cec0b7968, deleteOnExit=true 2024-12-08T00:46:21,606 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-08T00:46:21,607 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/52e2be55-3edc-d50d-dd17-5d14a52ab0cc/test.cache.data in system properties and HBase conf 2024-12-08T00:46:21,608 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/52e2be55-3edc-d50d-dd17-5d14a52ab0cc/hadoop.tmp.dir in system properties and HBase conf 2024-12-08T00:46:21,608 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/52e2be55-3edc-d50d-dd17-5d14a52ab0cc/hadoop.log.dir in system properties and HBase conf 2024-12-08T00:46:21,609 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/52e2be55-3edc-d50d-dd17-5d14a52ab0cc/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-08T00:46:21,609 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/52e2be55-3edc-d50d-dd17-5d14a52ab0cc/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-08T00:46:21,609 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-08T00:46:21,683 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-08T00:46:21,756 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-08T00:46:21,759 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/52e2be55-3edc-d50d-dd17-5d14a52ab0cc/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-08T00:46:21,760 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/52e2be55-3edc-d50d-dd17-5d14a52ab0cc/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-08T00:46:21,760 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/52e2be55-3edc-d50d-dd17-5d14a52ab0cc/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-08T00:46:21,761 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/52e2be55-3edc-d50d-dd17-5d14a52ab0cc/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-08T00:46:21,761 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/52e2be55-3edc-d50d-dd17-5d14a52ab0cc/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-08T00:46:21,761 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/52e2be55-3edc-d50d-dd17-5d14a52ab0cc/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-08T00:46:21,762 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/52e2be55-3edc-d50d-dd17-5d14a52ab0cc/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-08T00:46:21,762 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/52e2be55-3edc-d50d-dd17-5d14a52ab0cc/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-08T00:46:21,762 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/52e2be55-3edc-d50d-dd17-5d14a52ab0cc/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-08T00:46:21,763 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/52e2be55-3edc-d50d-dd17-5d14a52ab0cc/nfs.dump.dir in system properties and HBase conf 2024-12-08T00:46:21,763 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/52e2be55-3edc-d50d-dd17-5d14a52ab0cc/java.io.tmpdir in system properties and HBase conf 2024-12-08T00:46:21,763 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/52e2be55-3edc-d50d-dd17-5d14a52ab0cc/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-08T00:46:21,764 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/52e2be55-3edc-d50d-dd17-5d14a52ab0cc/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-08T00:46:21,764 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/52e2be55-3edc-d50d-dd17-5d14a52ab0cc/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-08T00:46:22,152 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-08T00:46:22,647 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-08T00:46:22,707 INFO [Time-limited test {}] log.Log(170): Logging initialized @2136ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-08T00:46:22,766 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T00:46:22,819 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T00:46:22,836 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T00:46:22,837 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T00:46:22,838 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T00:46:22,850 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T00:46:22,852 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2f2ab976{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/52e2be55-3edc-d50d-dd17-5d14a52ab0cc/hadoop.log.dir/,AVAILABLE} 2024-12-08T00:46:22,853 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@46a86f8c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T00:46:23,044 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4c77270f{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/52e2be55-3edc-d50d-dd17-5d14a52ab0cc/java.io.tmpdir/jetty-localhost-40407-hadoop-hdfs-3_4_1-tests_jar-_-any-9806926458631541420/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-08T00:46:23,050 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c5145e6{HTTP/1.1, (http/1.1)}{localhost:40407} 2024-12-08T00:46:23,050 INFO [Time-limited test {}] server.Server(415): Started @2480ms 2024-12-08T00:46:23,077 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-08T00:46:23,540 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T00:46:23,546 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T00:46:23,547 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T00:46:23,547 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T00:46:23,548 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-08T00:46:23,549 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2aa5bb6e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/52e2be55-3edc-d50d-dd17-5d14a52ab0cc/hadoop.log.dir/,AVAILABLE} 2024-12-08T00:46:23,549 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5bdc1e47{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T00:46:23,643 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@59e63bea{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/52e2be55-3edc-d50d-dd17-5d14a52ab0cc/java.io.tmpdir/jetty-localhost-38185-hadoop-hdfs-3_4_1-tests_jar-_-any-5443109677081170200/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T00:46:23,644 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@264a9341{HTTP/1.1, (http/1.1)}{localhost:38185} 2024-12-08T00:46:23,644 INFO [Time-limited test {}] server.Server(415): Started @3074ms 2024-12-08T00:46:23,692 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T00:46:23,787 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T00:46:23,791 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T00:46:23,792 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T00:46:23,792 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T00:46:23,793 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T00:46:23,793 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5ae73635{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/52e2be55-3edc-d50d-dd17-5d14a52ab0cc/hadoop.log.dir/,AVAILABLE} 2024-12-08T00:46:23,794 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@335a4f9a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T00:46:23,889 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@55d18735{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/52e2be55-3edc-d50d-dd17-5d14a52ab0cc/java.io.tmpdir/jetty-localhost-44533-hadoop-hdfs-3_4_1-tests_jar-_-any-14658079824298929840/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T00:46:23,890 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@15370523{HTTP/1.1, (http/1.1)}{localhost:44533} 2024-12-08T00:46:23,890 INFO [Time-limited test {}] server.Server(415): Started @3320ms 2024-12-08T00:46:23,892 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T00:46:24,593 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/52e2be55-3edc-d50d-dd17-5d14a52ab0cc/cluster_a09d45df-b862-74bd-d733-bc6cec0b7968/data/data3/current/BP-1861119566-172.17.0.2-1733618782229/current, will proceed with Du for space computation calculation, 2024-12-08T00:46:24,593 WARN [Thread-100 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/52e2be55-3edc-d50d-dd17-5d14a52ab0cc/cluster_a09d45df-b862-74bd-d733-bc6cec0b7968/data/data1/current/BP-1861119566-172.17.0.2-1733618782229/current, will proceed with Du for space computation calculation, 2024-12-08T00:46:24,593 WARN [Thread-99 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/52e2be55-3edc-d50d-dd17-5d14a52ab0cc/cluster_a09d45df-b862-74bd-d733-bc6cec0b7968/data/data4/current/BP-1861119566-172.17.0.2-1733618782229/current, will proceed with Du for space computation calculation, 2024-12-08T00:46:24,593 WARN [Thread-101 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/52e2be55-3edc-d50d-dd17-5d14a52ab0cc/cluster_a09d45df-b862-74bd-d733-bc6cec0b7968/data/data2/current/BP-1861119566-172.17.0.2-1733618782229/current, will proceed with Du for space computation calculation, 2024-12-08T00:46:24,626 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T00:46:24,626 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T00:46:24,667 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd330259741d6bf33 with lease ID 0xfec8d52bdc1ee92a: Processing first storage report for DS-eeb51b74-7f1b-4d41-8453-2aceb794d2f3 from datanode DatanodeRegistration(127.0.0.1:33503, datanodeUuid=f4037960-4842-4e5e-8144-af952f06bc4a, infoPort=35101, infoSecurePort=0, ipcPort=35785, storageInfo=lv=-57;cid=testClusterID;nsid=1721534741;c=1733618782229) 2024-12-08T00:46:24,668 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd330259741d6bf33 with lease ID 0xfec8d52bdc1ee92a: from storage DS-eeb51b74-7f1b-4d41-8453-2aceb794d2f3 node DatanodeRegistration(127.0.0.1:33503, datanodeUuid=f4037960-4842-4e5e-8144-af952f06bc4a, infoPort=35101, infoSecurePort=0, ipcPort=35785, storageInfo=lv=-57;cid=testClusterID;nsid=1721534741;c=1733618782229), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-08T00:46:24,669 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x50da642cf5935b79 with lease ID 0xfec8d52bdc1ee92b: Processing first storage report for DS-bf3369fc-a168-4c1b-a21c-8becf5bc2840 from datanode DatanodeRegistration(127.0.0.1:33661, datanodeUuid=8f21831f-3582-4153-93a7-2deb4a8e217d, infoPort=34291, infoSecurePort=0, ipcPort=43815, storageInfo=lv=-57;cid=testClusterID;nsid=1721534741;c=1733618782229) 2024-12-08T00:46:24,669 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x50da642cf5935b79 with lease ID 0xfec8d52bdc1ee92b: from storage DS-bf3369fc-a168-4c1b-a21c-8becf5bc2840 node DatanodeRegistration(127.0.0.1:33661, datanodeUuid=8f21831f-3582-4153-93a7-2deb4a8e217d, infoPort=34291, infoSecurePort=0, ipcPort=43815, storageInfo=lv=-57;cid=testClusterID;nsid=1721534741;c=1733618782229), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T00:46:24,669 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd330259741d6bf33 with lease ID 0xfec8d52bdc1ee92a: Processing first storage report for DS-ff9d60b6-b2ab-474c-ac1a-6e9f24df5bf3 from datanode DatanodeRegistration(127.0.0.1:33503, datanodeUuid=f4037960-4842-4e5e-8144-af952f06bc4a, infoPort=35101, infoSecurePort=0, ipcPort=35785, storageInfo=lv=-57;cid=testClusterID;nsid=1721534741;c=1733618782229) 2024-12-08T00:46:24,669 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd330259741d6bf33 with lease ID 0xfec8d52bdc1ee92a: from storage DS-ff9d60b6-b2ab-474c-ac1a-6e9f24df5bf3 node DatanodeRegistration(127.0.0.1:33503, datanodeUuid=f4037960-4842-4e5e-8144-af952f06bc4a, infoPort=35101, infoSecurePort=0, ipcPort=35785, storageInfo=lv=-57;cid=testClusterID;nsid=1721534741;c=1733618782229), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T00:46:24,670 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x50da642cf5935b79 with lease ID 0xfec8d52bdc1ee92b: Processing first storage report for DS-995d3920-382d-494e-bb47-c26dd0680dc7 from datanode DatanodeRegistration(127.0.0.1:33661, datanodeUuid=8f21831f-3582-4153-93a7-2deb4a8e217d, infoPort=34291, infoSecurePort=0, ipcPort=43815, storageInfo=lv=-57;cid=testClusterID;nsid=1721534741;c=1733618782229) 2024-12-08T00:46:24,670 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x50da642cf5935b79 with lease ID 0xfec8d52bdc1ee92b: from storage DS-995d3920-382d-494e-bb47-c26dd0680dc7 node DatanodeRegistration(127.0.0.1:33661, datanodeUuid=8f21831f-3582-4153-93a7-2deb4a8e217d, infoPort=34291, infoSecurePort=0, ipcPort=43815, storageInfo=lv=-57;cid=testClusterID;nsid=1721534741;c=1733618782229), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-08T00:46:24,770 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/52e2be55-3edc-d50d-dd17-5d14a52ab0cc 2024-12-08T00:46:24,825 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/52e2be55-3edc-d50d-dd17-5d14a52ab0cc/cluster_a09d45df-b862-74bd-d733-bc6cec0b7968/zookeeper_0, clientPort=49878, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/52e2be55-3edc-d50d-dd17-5d14a52ab0cc/cluster_a09d45df-b862-74bd-d733-bc6cec0b7968/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/52e2be55-3edc-d50d-dd17-5d14a52ab0cc/cluster_a09d45df-b862-74bd-d733-bc6cec0b7968/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-08T00:46:24,833 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=49878 2024-12-08T00:46:24,842 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:46:24,844 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:46:25,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33503 is added to blk_1073741825_1001 (size=7) 2024-12-08T00:46:25,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33661 is added to blk_1073741825_1001 (size=7) 2024-12-08T00:46:25,454 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557 with version=8 2024-12-08T00:46:25,455 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/hbase-staging 2024-12-08T00:46:25,522 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-08T00:46:25,752 INFO [Time-limited test {}] client.ConnectionUtils(128): master/0f983e3e5be1:0 server-side Connection retries=45 2024-12-08T00:46:25,760 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T00:46:25,761 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T00:46:25,764 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T00:46:25,765 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T00:46:25,765 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T00:46:25,869 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-08T00:46:25,914 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-08T00:46:25,921 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-08T00:46:25,924 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T00:46:25,944 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 21221 (auto-detected) 2024-12-08T00:46:25,945 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-08T00:46:25,961 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40409 2024-12-08T00:46:25,977 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:40409 connecting to ZooKeeper ensemble=127.0.0.1:49878 2024-12-08T00:46:26,088 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:404090x0, quorum=127.0.0.1:49878, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T00:46:26,090 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40409-0x10002f118c80000 connected 2024-12-08T00:46:26,187 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:46:26,191 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:46:26,203 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40409-0x10002f118c80000, quorum=127.0.0.1:49878, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T00:46:26,207 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557, hbase.cluster.distributed=false 2024-12-08T00:46:26,225 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40409-0x10002f118c80000, quorum=127.0.0.1:49878, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T00:46:26,229 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40409 2024-12-08T00:46:26,230 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40409 2024-12-08T00:46:26,230 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40409 2024-12-08T00:46:26,231 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40409 2024-12-08T00:46:26,231 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40409 2024-12-08T00:46:26,321 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/0f983e3e5be1:0 server-side Connection retries=45 2024-12-08T00:46:26,322 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T00:46:26,323 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T00:46:26,323 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T00:46:26,323 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T00:46:26,323 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T00:46:26,325 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-08T00:46:26,327 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T00:46:26,328 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34109 2024-12-08T00:46:26,330 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34109 connecting to ZooKeeper ensemble=127.0.0.1:49878 2024-12-08T00:46:26,331 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:46:26,335 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:46:26,360 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:341090x0, quorum=127.0.0.1:49878, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T00:46:26,361 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34109-0x10002f118c80001 connected 2024-12-08T00:46:26,361 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34109-0x10002f118c80001, quorum=127.0.0.1:49878, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T00:46:26,365 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-08T00:46:26,372 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-08T00:46:26,374 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34109-0x10002f118c80001, quorum=127.0.0.1:49878, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-08T00:46:26,378 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34109-0x10002f118c80001, quorum=127.0.0.1:49878, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T00:46:26,379 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34109 2024-12-08T00:46:26,380 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34109 2024-12-08T00:46:26,380 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34109 2024-12-08T00:46:26,382 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34109 2024-12-08T00:46:26,382 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34109 2024-12-08T00:46:26,396 DEBUG [M:0;0f983e3e5be1:40409 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;0f983e3e5be1:40409 2024-12-08T00:46:26,397 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/0f983e3e5be1,40409,1733618785607 2024-12-08T00:46:26,425 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34109-0x10002f118c80001, quorum=127.0.0.1:49878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T00:46:26,425 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40409-0x10002f118c80000, quorum=127.0.0.1:49878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T00:46:26,428 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40409-0x10002f118c80000, quorum=127.0.0.1:49878, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/0f983e3e5be1,40409,1733618785607 2024-12-08T00:46:26,460 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34109-0x10002f118c80001, quorum=127.0.0.1:49878, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-08T00:46:26,460 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40409-0x10002f118c80000, quorum=127.0.0.1:49878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:26,460 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34109-0x10002f118c80001, quorum=127.0.0.1:49878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:26,461 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40409-0x10002f118c80000, quorum=127.0.0.1:49878, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-08T00:46:26,462 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/0f983e3e5be1,40409,1733618785607 from backup master directory 2024-12-08T00:46:26,474 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40409-0x10002f118c80000, quorum=127.0.0.1:49878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/0f983e3e5be1,40409,1733618785607 2024-12-08T00:46:26,474 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34109-0x10002f118c80001, quorum=127.0.0.1:49878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T00:46:26,475 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40409-0x10002f118c80000, quorum=127.0.0.1:49878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T00:46:26,475 WARN [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T00:46:26,476 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=0f983e3e5be1,40409,1733618785607 2024-12-08T00:46:26,478 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-08T00:46:26,480 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-08T00:46:26,528 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/hbase.id] with ID: ef0f5365-e09c-46a1-b9f8-3996ac576fc3 2024-12-08T00:46:26,528 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/.tmp/hbase.id 2024-12-08T00:46:26,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33661 is added to blk_1073741826_1002 (size=42) 2024-12-08T00:46:26,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33503 is added to blk_1073741826_1002 (size=42) 2024-12-08T00:46:26,540 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/.tmp/hbase.id]:[hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/hbase.id] 2024-12-08T00:46:26,582 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:46:26,586 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-08T00:46:26,602 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 14ms. 2024-12-08T00:46:26,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40409-0x10002f118c80000, quorum=127.0.0.1:49878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:26,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34109-0x10002f118c80001, quorum=127.0.0.1:49878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:26,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33661 is added to blk_1073741827_1003 (size=196) 2024-12-08T00:46:26,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33503 is added to blk_1073741827_1003 (size=196) 2024-12-08T00:46:26,674 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T00:46:26,675 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-08T00:46:26,680 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T00:46:26,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33503 is added to blk_1073741828_1004 (size=1189) 2024-12-08T00:46:26,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33661 is added to blk_1073741828_1004 (size=1189) 2024-12-08T00:46:26,722 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/MasterData/data/master/store 2024-12-08T00:46:26,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33661 is added to blk_1073741829_1005 (size=34) 2024-12-08T00:46:26,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33503 is added to blk_1073741829_1005 (size=34) 2024-12-08T00:46:26,746 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-08T00:46:26,750 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:46:26,751 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-08T00:46:26,751 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:46:26,752 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:46:26,754 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-08T00:46:26,754 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:46:26,754 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:46:26,756 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733618786751Disabling compacts and flushes for region at 1733618786751Disabling writes for close at 1733618786754 (+3 ms)Writing region close event to WAL at 1733618786754Closed at 1733618786754 2024-12-08T00:46:26,758 WARN [master/0f983e3e5be1:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/MasterData/data/master/store/.initializing 2024-12-08T00:46:26,758 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/MasterData/WALs/0f983e3e5be1,40409,1733618785607 2024-12-08T00:46:26,779 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0f983e3e5be1%2C40409%2C1733618785607, suffix=, logDir=hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/MasterData/WALs/0f983e3e5be1,40409,1733618785607, archiveDir=hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/MasterData/oldWALs, maxLogs=10 2024-12-08T00:46:26,787 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0f983e3e5be1%2C40409%2C1733618785607.1733618786783 2024-12-08T00:46:26,804 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/MasterData/WALs/0f983e3e5be1,40409,1733618785607/0f983e3e5be1%2C40409%2C1733618785607.1733618786783 2024-12-08T00:46:26,811 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35101:35101),(127.0.0.1/127.0.0.1:34291:34291)] 2024-12-08T00:46:26,812 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-08T00:46:26,813 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:46:26,817 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:46:26,818 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:46:26,850 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:46:26,870 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-08T00:46:26,873 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:46:26,875 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:46:26,876 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:46:26,878 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-08T00:46:26,878 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:46:26,879 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T00:46:26,880 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:46:26,882 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-08T00:46:26,882 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:46:26,883 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T00:46:26,883 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:46:26,885 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-08T00:46:26,886 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:46:26,886 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T00:46:26,887 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:46:26,890 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:46:26,891 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:46:26,895 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:46:26,896 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:46:26,899 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-08T00:46:26,903 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:46:26,907 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T00:46:26,908 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=741136, jitterRate=-0.057597026228904724}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-08T00:46:26,912 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733618786830Initializing all the Stores at 1733618786832 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733618786832Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733618786833 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733618786833Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733618786833Cleaning up temporary data from old regions at 1733618786896 (+63 ms)Region opened successfully at 1733618786912 (+16 ms) 2024-12-08T00:46:26,913 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-08T00:46:26,943 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7b582434, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0f983e3e5be1/172.17.0.2:0 2024-12-08T00:46:26,968 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-08T00:46:26,977 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-08T00:46:26,977 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-08T00:46:26,979 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-08T00:46:26,981 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-08T00:46:26,985 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-12-08T00:46:26,985 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-08T00:46:27,011 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-08T00:46:27,021 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40409-0x10002f118c80000, quorum=127.0.0.1:49878, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-08T00:46:27,068 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-08T00:46:27,073 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-08T00:46:27,076 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40409-0x10002f118c80000, quorum=127.0.0.1:49878, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-08T00:46:27,084 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-08T00:46:27,086 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-08T00:46:27,090 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40409-0x10002f118c80000, quorum=127.0.0.1:49878, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-08T00:46:27,099 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-08T00:46:27,101 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40409-0x10002f118c80000, quorum=127.0.0.1:49878, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-08T00:46:27,110 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-08T00:46:27,130 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40409-0x10002f118c80000, quorum=127.0.0.1:49878, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-08T00:46:27,141 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-08T00:46:27,152 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34109-0x10002f118c80001, quorum=127.0.0.1:49878, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T00:46:27,152 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40409-0x10002f118c80000, quorum=127.0.0.1:49878, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T00:46:27,152 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34109-0x10002f118c80001, quorum=127.0.0.1:49878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:27,152 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40409-0x10002f118c80000, quorum=127.0.0.1:49878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:27,155 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=0f983e3e5be1,40409,1733618785607, sessionid=0x10002f118c80000, setting cluster-up flag (Was=false) 2024-12-08T00:46:27,185 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34109-0x10002f118c80001, quorum=127.0.0.1:49878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:27,185 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40409-0x10002f118c80000, quorum=127.0.0.1:49878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:27,210 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-08T00:46:27,214 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0f983e3e5be1,40409,1733618785607 2024-12-08T00:46:27,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34109-0x10002f118c80001, quorum=127.0.0.1:49878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:27,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40409-0x10002f118c80000, quorum=127.0.0.1:49878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:27,260 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-08T00:46:27,262 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0f983e3e5be1,40409,1733618785607 2024-12-08T00:46:27,269 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-08T00:46:27,286 INFO [RS:0;0f983e3e5be1:34109 {}] regionserver.HRegionServer(746): ClusterId : ef0f5365-e09c-46a1-b9f8-3996ac576fc3 2024-12-08T00:46:27,289 DEBUG [RS:0;0f983e3e5be1:34109 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-08T00:46:27,302 DEBUG [RS:0;0f983e3e5be1:34109 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-08T00:46:27,302 DEBUG [RS:0;0f983e3e5be1:34109 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-08T00:46:27,311 DEBUG [RS:0;0f983e3e5be1:34109 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-08T00:46:27,312 DEBUG [RS:0;0f983e3e5be1:34109 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6b8ba653, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0f983e3e5be1/172.17.0.2:0 2024-12-08T00:46:27,324 DEBUG [RS:0;0f983e3e5be1:34109 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;0f983e3e5be1:34109 2024-12-08T00:46:27,326 INFO [RS:0;0f983e3e5be1:34109 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-08T00:46:27,326 INFO [RS:0;0f983e3e5be1:34109 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-08T00:46:27,326 DEBUG [RS:0;0f983e3e5be1:34109 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-08T00:46:27,328 INFO [RS:0;0f983e3e5be1:34109 {}] regionserver.HRegionServer(2659): reportForDuty to master=0f983e3e5be1,40409,1733618785607 with port=34109, startcode=1733618786289 2024-12-08T00:46:27,338 DEBUG [RS:0;0f983e3e5be1:34109 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-08T00:46:27,338 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-08T00:46:27,346 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-08T00:46:27,352 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-08T00:46:27,358 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 0f983e3e5be1,40409,1733618785607 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-08T00:46:27,366 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/0f983e3e5be1:0, corePoolSize=5, maxPoolSize=5 2024-12-08T00:46:27,367 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/0f983e3e5be1:0, corePoolSize=5, maxPoolSize=5 2024-12-08T00:46:27,367 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/0f983e3e5be1:0, corePoolSize=5, maxPoolSize=5 2024-12-08T00:46:27,367 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/0f983e3e5be1:0, corePoolSize=5, maxPoolSize=5 2024-12-08T00:46:27,367 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/0f983e3e5be1:0, corePoolSize=10, maxPoolSize=10 2024-12-08T00:46:27,367 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:27,368 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/0f983e3e5be1:0, corePoolSize=2, maxPoolSize=2 2024-12-08T00:46:27,368 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:27,372 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733618817372 2024-12-08T00:46:27,372 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T00:46:27,373 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-08T00:46:27,374 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-08T00:46:27,375 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-08T00:46:27,378 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-08T00:46:27,378 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-08T00:46:27,379 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-08T00:46:27,379 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-08T00:46:27,379 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:46:27,379 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-08T00:46:27,380 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:27,383 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-08T00:46:27,384 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-08T00:46:27,385 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-08T00:46:27,389 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-08T00:46:27,389 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-08T00:46:27,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33661 is added to blk_1073741831_1007 (size=1321) 2024-12-08T00:46:27,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33503 is added to blk_1073741831_1007 (size=1321) 2024-12-08T00:46:27,395 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/0f983e3e5be1:0:becomeActiveMaster-HFileCleaner.large.0-1733618787390,5,FailOnTimeoutGroup] 2024-12-08T00:46:27,396 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/0f983e3e5be1:0:becomeActiveMaster-HFileCleaner.small.0-1733618787396,5,FailOnTimeoutGroup] 2024-12-08T00:46:27,397 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:27,397 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-08T00:46:27,397 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-08T00:46:27,398 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42051, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-08T00:46:27,398 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557 2024-12-08T00:46:27,398 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:27,399 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:27,404 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40409 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 0f983e3e5be1,34109,1733618786289 2024-12-08T00:46:27,406 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40409 {}] master.ServerManager(517): Registering regionserver=0f983e3e5be1,34109,1733618786289 2024-12-08T00:46:27,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33661 is added to blk_1073741832_1008 (size=32) 2024-12-08T00:46:27,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33503 is added to blk_1073741832_1008 (size=32) 2024-12-08T00:46:27,412 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:46:27,415 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-08T00:46:27,418 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-08T00:46:27,418 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:46:27,419 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:46:27,419 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-08T00:46:27,421 DEBUG [RS:0;0f983e3e5be1:34109 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557 2024-12-08T00:46:27,421 DEBUG [RS:0;0f983e3e5be1:34109 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44103 2024-12-08T00:46:27,421 DEBUG [RS:0;0f983e3e5be1:34109 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-08T00:46:27,422 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-08T00:46:27,422 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:46:27,423 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:46:27,423 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-08T00:46:27,426 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-08T00:46:27,426 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:46:27,427 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:46:27,428 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-08T00:46:27,430 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-08T00:46:27,430 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:46:27,431 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:46:27,431 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-08T00:46:27,433 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/hbase/meta/1588230740 2024-12-08T00:46:27,434 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/hbase/meta/1588230740 2024-12-08T00:46:27,436 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-08T00:46:27,437 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-08T00:46:27,438 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-08T00:46:27,440 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-08T00:46:27,443 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40409-0x10002f118c80000, quorum=127.0.0.1:49878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T00:46:27,444 DEBUG [RS:0;0f983e3e5be1:34109 {}] zookeeper.ZKUtil(111): regionserver:34109-0x10002f118c80001, quorum=127.0.0.1:49878, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/0f983e3e5be1,34109,1733618786289 2024-12-08T00:46:27,444 WARN [RS:0;0f983e3e5be1:34109 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T00:46:27,444 INFO [RS:0;0f983e3e5be1:34109 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T00:46:27,444 DEBUG [RS:0;0f983e3e5be1:34109 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/WALs/0f983e3e5be1,34109,1733618786289 2024-12-08T00:46:27,444 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T00:46:27,446 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=873278, jitterRate=0.11043095588684082}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-08T00:46:27,446 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [0f983e3e5be1,34109,1733618786289] 2024-12-08T00:46:27,449 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733618787413Initializing all the Stores at 1733618787415 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733618787415Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733618787415Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733618787415Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733618787415Cleaning up temporary data from old regions at 1733618787437 (+22 ms)Region opened successfully at 1733618787449 (+12 ms) 2024-12-08T00:46:27,449 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-08T00:46:27,449 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-08T00:46:27,449 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-08T00:46:27,449 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-08T00:46:27,450 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-08T00:46:27,451 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-08T00:46:27,451 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733618787449Disabling compacts and flushes for region at 1733618787449Disabling writes for close at 1733618787450 (+1 ms)Writing region close event to WAL at 1733618787450Closed at 1733618787451 (+1 ms) 2024-12-08T00:46:27,455 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T00:46:27,455 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-08T00:46:27,463 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-08T00:46:27,466 INFO [RS:0;0f983e3e5be1:34109 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-08T00:46:27,472 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-08T00:46:27,475 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-08T00:46:27,478 INFO [RS:0;0f983e3e5be1:34109 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-08T00:46:27,483 INFO [RS:0;0f983e3e5be1:34109 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-08T00:46:27,483 INFO [RS:0;0f983e3e5be1:34109 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:27,484 INFO [RS:0;0f983e3e5be1:34109 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-08T00:46:27,489 INFO [RS:0;0f983e3e5be1:34109 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-08T00:46:27,490 INFO [RS:0;0f983e3e5be1:34109 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:27,490 DEBUG [RS:0;0f983e3e5be1:34109 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:27,490 DEBUG [RS:0;0f983e3e5be1:34109 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:27,491 DEBUG [RS:0;0f983e3e5be1:34109 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:27,491 DEBUG [RS:0;0f983e3e5be1:34109 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:27,491 DEBUG [RS:0;0f983e3e5be1:34109 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:27,491 DEBUG [RS:0;0f983e3e5be1:34109 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/0f983e3e5be1:0, corePoolSize=2, maxPoolSize=2 2024-12-08T00:46:27,491 DEBUG [RS:0;0f983e3e5be1:34109 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:27,491 DEBUG [RS:0;0f983e3e5be1:34109 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:27,491 DEBUG [RS:0;0f983e3e5be1:34109 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:27,491 DEBUG [RS:0;0f983e3e5be1:34109 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:27,492 DEBUG [RS:0;0f983e3e5be1:34109 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:27,492 DEBUG [RS:0;0f983e3e5be1:34109 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:27,492 DEBUG [RS:0;0f983e3e5be1:34109 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/0f983e3e5be1:0, corePoolSize=3, maxPoolSize=3 2024-12-08T00:46:27,492 DEBUG [RS:0;0f983e3e5be1:34109 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0, corePoolSize=3, maxPoolSize=3 2024-12-08T00:46:27,493 INFO [RS:0;0f983e3e5be1:34109 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:27,493 INFO [RS:0;0f983e3e5be1:34109 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:27,493 INFO [RS:0;0f983e3e5be1:34109 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:27,493 INFO [RS:0;0f983e3e5be1:34109 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:27,493 INFO [RS:0;0f983e3e5be1:34109 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:27,493 INFO [RS:0;0f983e3e5be1:34109 {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,34109,1733618786289-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T00:46:27,508 INFO [RS:0;0f983e3e5be1:34109 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-08T00:46:27,510 INFO [RS:0;0f983e3e5be1:34109 {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,34109,1733618786289-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:27,510 INFO [RS:0;0f983e3e5be1:34109 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:27,510 INFO [RS:0;0f983e3e5be1:34109 {}] regionserver.Replication(171): 0f983e3e5be1,34109,1733618786289 started 2024-12-08T00:46:27,525 INFO [RS:0;0f983e3e5be1:34109 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:27,526 INFO [RS:0;0f983e3e5be1:34109 {}] regionserver.HRegionServer(1482): Serving as 0f983e3e5be1,34109,1733618786289, RpcServer on 0f983e3e5be1/172.17.0.2:34109, sessionid=0x10002f118c80001 2024-12-08T00:46:27,526 DEBUG [RS:0;0f983e3e5be1:34109 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-08T00:46:27,526 DEBUG [RS:0;0f983e3e5be1:34109 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 0f983e3e5be1,34109,1733618786289 2024-12-08T00:46:27,527 DEBUG [RS:0;0f983e3e5be1:34109 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0f983e3e5be1,34109,1733618786289' 2024-12-08T00:46:27,527 DEBUG [RS:0;0f983e3e5be1:34109 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-08T00:46:27,528 DEBUG [RS:0;0f983e3e5be1:34109 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-08T00:46:27,528 DEBUG [RS:0;0f983e3e5be1:34109 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-08T00:46:27,528 DEBUG [RS:0;0f983e3e5be1:34109 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-08T00:46:27,529 DEBUG [RS:0;0f983e3e5be1:34109 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 0f983e3e5be1,34109,1733618786289 2024-12-08T00:46:27,529 DEBUG [RS:0;0f983e3e5be1:34109 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0f983e3e5be1,34109,1733618786289' 2024-12-08T00:46:27,529 DEBUG [RS:0;0f983e3e5be1:34109 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-08T00:46:27,529 DEBUG [RS:0;0f983e3e5be1:34109 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-08T00:46:27,530 DEBUG [RS:0;0f983e3e5be1:34109 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-08T00:46:27,530 INFO [RS:0;0f983e3e5be1:34109 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-08T00:46:27,530 INFO [RS:0;0f983e3e5be1:34109 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-08T00:46:27,626 WARN [0f983e3e5be1:40409 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-08T00:46:27,639 INFO [RS:0;0f983e3e5be1:34109 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0f983e3e5be1%2C34109%2C1733618786289, suffix=, logDir=hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/WALs/0f983e3e5be1,34109,1733618786289, archiveDir=hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/oldWALs, maxLogs=32 2024-12-08T00:46:27,644 INFO [RS:0;0f983e3e5be1:34109 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0f983e3e5be1%2C34109%2C1733618786289.1733618787644 2024-12-08T00:46:27,652 INFO [RS:0;0f983e3e5be1:34109 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/WALs/0f983e3e5be1,34109,1733618786289/0f983e3e5be1%2C34109%2C1733618786289.1733618787644 2024-12-08T00:46:27,654 DEBUG [RS:0;0f983e3e5be1:34109 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34291:34291),(127.0.0.1/127.0.0.1:35101:35101)] 2024-12-08T00:46:27,882 DEBUG [0f983e3e5be1:40409 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-08T00:46:27,895 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=0f983e3e5be1,34109,1733618786289 2024-12-08T00:46:27,900 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0f983e3e5be1,34109,1733618786289, state=OPENING 2024-12-08T00:46:27,943 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-08T00:46:27,951 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40409-0x10002f118c80000, quorum=127.0.0.1:49878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:27,951 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34109-0x10002f118c80001, quorum=127.0.0.1:49878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:27,953 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T00:46:27,953 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T00:46:27,955 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-08T00:46:27,958 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=0f983e3e5be1,34109,1733618786289}] 2024-12-08T00:46:28,140 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-08T00:46:28,143 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60575, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-08T00:46:28,152 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-08T00:46:28,153 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T00:46:28,156 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0f983e3e5be1%2C34109%2C1733618786289.meta, suffix=.meta, logDir=hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/WALs/0f983e3e5be1,34109,1733618786289, archiveDir=hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/oldWALs, maxLogs=32 2024-12-08T00:46:28,158 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 0f983e3e5be1%2C34109%2C1733618786289.meta.1733618788157.meta 2024-12-08T00:46:28,166 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/WALs/0f983e3e5be1,34109,1733618786289/0f983e3e5be1%2C34109%2C1733618786289.meta.1733618788157.meta 2024-12-08T00:46:28,169 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34291:34291),(127.0.0.1/127.0.0.1:35101:35101)] 2024-12-08T00:46:28,171 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-08T00:46:28,173 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-08T00:46:28,175 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-08T00:46:28,179 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-08T00:46:28,183 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-08T00:46:28,184 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:46:28,184 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-08T00:46:28,184 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-08T00:46:28,187 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-08T00:46:28,189 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-08T00:46:28,189 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:46:28,190 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:46:28,190 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-08T00:46:28,191 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-08T00:46:28,192 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:46:28,193 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:46:28,193 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-08T00:46:28,194 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-08T00:46:28,194 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:46:28,195 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:46:28,195 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-08T00:46:28,196 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-08T00:46:28,196 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:46:28,197 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:46:28,197 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-08T00:46:28,199 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/hbase/meta/1588230740 2024-12-08T00:46:28,201 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/hbase/meta/1588230740 2024-12-08T00:46:28,204 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-08T00:46:28,204 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-08T00:46:28,205 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-08T00:46:28,208 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-08T00:46:28,209 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=878871, jitterRate=0.1175433099269867}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-08T00:46:28,209 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-08T00:46:28,210 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733618788184Writing region info on filesystem at 1733618788185 (+1 ms)Initializing all the Stores at 1733618788186 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733618788186Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733618788187 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733618788187Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733618788187Cleaning up temporary data from old regions at 1733618788204 (+17 ms)Running coprocessor post-open hooks at 1733618788209 (+5 ms)Region opened successfully at 1733618788210 (+1 ms) 2024-12-08T00:46:28,216 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733618788132 2024-12-08T00:46:28,226 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-08T00:46:28,227 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-08T00:46:28,228 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=0f983e3e5be1,34109,1733618786289 2024-12-08T00:46:28,230 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0f983e3e5be1,34109,1733618786289, state=OPEN 2024-12-08T00:46:28,283 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34109-0x10002f118c80001, quorum=127.0.0.1:49878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T00:46:28,283 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40409-0x10002f118c80000, quorum=127.0.0.1:49878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T00:46:28,284 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T00:46:28,284 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T00:46:28,284 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=0f983e3e5be1,34109,1733618786289 2024-12-08T00:46:28,290 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-08T00:46:28,290 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=0f983e3e5be1,34109,1733618786289 in 326 msec 2024-12-08T00:46:28,297 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-08T00:46:28,298 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 829 msec 2024-12-08T00:46:28,299 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T00:46:28,299 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-08T00:46:28,317 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-08T00:46:28,318 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=0f983e3e5be1,34109,1733618786289, seqNum=-1] 2024-12-08T00:46:28,334 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:46:28,335 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51951, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:46:28,354 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.0550 sec 2024-12-08T00:46:28,354 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733618788354, completionTime=-1 2024-12-08T00:46:28,357 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-08T00:46:28,357 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-08T00:46:28,381 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-08T00:46:28,381 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733618848381 2024-12-08T00:46:28,381 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733618908381 2024-12-08T00:46:28,381 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 24 msec 2024-12-08T00:46:28,384 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,40409,1733618785607-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:28,384 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,40409,1733618785607-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:28,384 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,40409,1733618785607-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:28,386 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-0f983e3e5be1:40409, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:28,386 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:28,386 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:28,392 DEBUG [master/0f983e3e5be1:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-08T00:46:28,415 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.939sec 2024-12-08T00:46:28,416 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-08T00:46:28,418 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-08T00:46:28,419 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-08T00:46:28,419 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-08T00:46:28,419 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-08T00:46:28,420 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,40409,1733618785607-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T00:46:28,420 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,40409,1733618785607-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-08T00:46:28,428 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-08T00:46:28,429 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-08T00:46:28,430 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,40409,1733618785607-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:28,495 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a88365d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:46:28,497 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-08T00:46:28,497 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-08T00:46:28,500 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 0f983e3e5be1,40409,-1 for getting cluster id 2024-12-08T00:46:28,502 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-08T00:46:28,510 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'ef0f5365-e09c-46a1-b9f8-3996ac576fc3' 2024-12-08T00:46:28,512 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-08T00:46:28,513 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "ef0f5365-e09c-46a1-b9f8-3996ac576fc3" 2024-12-08T00:46:28,513 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@46a29423, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:46:28,513 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [0f983e3e5be1,40409,-1] 2024-12-08T00:46:28,516 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-08T00:46:28,517 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:46:28,519 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57156, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-08T00:46:28,521 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1637bc3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:46:28,521 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-08T00:46:28,527 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=0f983e3e5be1,34109,1733618786289, seqNum=-1] 2024-12-08T00:46:28,528 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:46:28,530 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37894, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:46:28,564 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=0f983e3e5be1,40409,1733618785607 2024-12-08T00:46:28,564 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:46:28,572 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-08T00:46:28,576 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-08T00:46:28,582 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 0f983e3e5be1,40409,1733618785607 2024-12-08T00:46:28,588 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@50af8e1e 2024-12-08T00:46:28,589 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-08T00:46:28,591 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57158, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-08T00:46:28,593 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40409 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-08T00:46:28,593 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40409 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-08T00:46:28,596 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40409 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T00:46:28,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40409 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-12-08T00:46:28,606 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-08T00:46:28,608 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40409 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-12-08T00:46:28,608 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:46:28,610 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-08T00:46:28,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40409 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-08T00:46:28,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33661 is added to blk_1073741835_1011 (size=389) 2024-12-08T00:46:28,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33503 is added to blk_1073741835_1011 (size=389) 2024-12-08T00:46:28,653 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => a700cdd56b87791a9367376798e8d385, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1733618788593.a700cdd56b87791a9367376798e8d385.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557 2024-12-08T00:46:28,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33503 is added to blk_1073741836_1012 (size=72) 2024-12-08T00:46:28,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33661 is added to blk_1073741836_1012 (size=72) 2024-12-08T00:46:28,664 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1733618788593.a700cdd56b87791a9367376798e8d385.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:46:28,664 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing a700cdd56b87791a9367376798e8d385, disabling compactions & flushes 2024-12-08T00:46:28,664 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1733618788593.a700cdd56b87791a9367376798e8d385. 2024-12-08T00:46:28,664 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1733618788593.a700cdd56b87791a9367376798e8d385. 2024-12-08T00:46:28,664 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1733618788593.a700cdd56b87791a9367376798e8d385. after waiting 0 ms 2024-12-08T00:46:28,664 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1733618788593.a700cdd56b87791a9367376798e8d385. 2024-12-08T00:46:28,664 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1733618788593.a700cdd56b87791a9367376798e8d385. 2024-12-08T00:46:28,664 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for a700cdd56b87791a9367376798e8d385: Waiting for close lock at 1733618788664Disabling compacts and flushes for region at 1733618788664Disabling writes for close at 1733618788664Writing region close event to WAL at 1733618788664Closed at 1733618788664 2024-12-08T00:46:28,666 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-08T00:46:28,670 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1733618788593.a700cdd56b87791a9367376798e8d385.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1733618788666"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733618788666"}]},"ts":"1733618788666"} 2024-12-08T00:46:28,675 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-08T00:46:28,676 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-08T00:46:28,679 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733618788676"}]},"ts":"1733618788676"} 2024-12-08T00:46:28,683 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-12-08T00:46:28,684 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=a700cdd56b87791a9367376798e8d385, ASSIGN}] 2024-12-08T00:46:28,686 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=a700cdd56b87791a9367376798e8d385, ASSIGN 2024-12-08T00:46:28,688 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=a700cdd56b87791a9367376798e8d385, ASSIGN; state=OFFLINE, location=0f983e3e5be1,34109,1733618786289; forceNewPlan=false, retain=false 2024-12-08T00:46:28,840 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=a700cdd56b87791a9367376798e8d385, regionState=OPENING, regionLocation=0f983e3e5be1,34109,1733618786289 2024-12-08T00:46:28,848 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=a700cdd56b87791a9367376798e8d385, ASSIGN because future has completed 2024-12-08T00:46:28,850 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure a700cdd56b87791a9367376798e8d385, server=0f983e3e5be1,34109,1733618786289}] 2024-12-08T00:46:29,017 INFO [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1733618788593.a700cdd56b87791a9367376798e8d385. 2024-12-08T00:46:29,018 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => a700cdd56b87791a9367376798e8d385, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1733618788593.a700cdd56b87791a9367376798e8d385.', STARTKEY => '', ENDKEY => ''} 2024-12-08T00:46:29,018 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling a700cdd56b87791a9367376798e8d385 2024-12-08T00:46:29,018 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1733618788593.a700cdd56b87791a9367376798e8d385.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:46:29,019 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for a700cdd56b87791a9367376798e8d385 2024-12-08T00:46:29,019 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for a700cdd56b87791a9367376798e8d385 2024-12-08T00:46:29,021 INFO [StoreOpener-a700cdd56b87791a9367376798e8d385-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region a700cdd56b87791a9367376798e8d385 2024-12-08T00:46:29,023 INFO [StoreOpener-a700cdd56b87791a9367376798e8d385-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a700cdd56b87791a9367376798e8d385 columnFamilyName info 2024-12-08T00:46:29,023 DEBUG [StoreOpener-a700cdd56b87791a9367376798e8d385-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:46:29,025 INFO [StoreOpener-a700cdd56b87791a9367376798e8d385-1 {}] regionserver.HStore(327): Store=a700cdd56b87791a9367376798e8d385/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T00:46:29,025 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for a700cdd56b87791a9367376798e8d385 2024-12-08T00:46:29,026 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/default/TestLogRolling-testSlowSyncLogRolling/a700cdd56b87791a9367376798e8d385 2024-12-08T00:46:29,027 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/default/TestLogRolling-testSlowSyncLogRolling/a700cdd56b87791a9367376798e8d385 2024-12-08T00:46:29,027 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for a700cdd56b87791a9367376798e8d385 2024-12-08T00:46:29,027 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for a700cdd56b87791a9367376798e8d385 2024-12-08T00:46:29,030 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for a700cdd56b87791a9367376798e8d385 2024-12-08T00:46:29,033 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/default/TestLogRolling-testSlowSyncLogRolling/a700cdd56b87791a9367376798e8d385/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T00:46:29,034 INFO [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened a700cdd56b87791a9367376798e8d385; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=734040, jitterRate=-0.06662002205848694}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-08T00:46:29,035 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for a700cdd56b87791a9367376798e8d385 2024-12-08T00:46:29,036 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for a700cdd56b87791a9367376798e8d385: Running coprocessor pre-open hook at 1733618789019Writing region info on filesystem at 1733618789019Initializing all the Stores at 1733618789020 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733618789020Cleaning up temporary data from old regions at 1733618789027 (+7 ms)Running coprocessor post-open hooks at 1733618789035 (+8 ms)Region opened successfully at 1733618789036 (+1 ms) 2024-12-08T00:46:29,037 INFO [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1733618788593.a700cdd56b87791a9367376798e8d385., pid=6, masterSystemTime=1733618789006 2024-12-08T00:46:29,041 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1733618788593.a700cdd56b87791a9367376798e8d385. 2024-12-08T00:46:29,041 INFO [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1733618788593.a700cdd56b87791a9367376798e8d385. 2024-12-08T00:46:29,042 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=a700cdd56b87791a9367376798e8d385, regionState=OPEN, openSeqNum=2, regionLocation=0f983e3e5be1,34109,1733618786289 2024-12-08T00:46:29,045 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure a700cdd56b87791a9367376798e8d385, server=0f983e3e5be1,34109,1733618786289 because future has completed 2024-12-08T00:46:29,051 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-08T00:46:29,051 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure a700cdd56b87791a9367376798e8d385, server=0f983e3e5be1,34109,1733618786289 in 197 msec 2024-12-08T00:46:29,054 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-08T00:46:29,054 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=a700cdd56b87791a9367376798e8d385, ASSIGN in 367 msec 2024-12-08T00:46:29,056 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-08T00:46:29,056 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733618789056"}]},"ts":"1733618789056"} 2024-12-08T00:46:29,059 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-12-08T00:46:29,060 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-08T00:46:29,063 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 461 msec 2024-12-08T00:46:33,694 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-08T00:46:33,737 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-08T00:46:33,738 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-12-08T00:46:35,912 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-08T00:46:35,913 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-08T00:46:35,917 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-12-08T00:46:35,917 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-12-08T00:46:35,919 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-08T00:46:35,919 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-08T00:46:35,920 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-08T00:46:35,920 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-08T00:46:38,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40409 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-08T00:46:38,689 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-12-08T00:46:38,694 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-12-08T00:46:38,699 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-12-08T00:46:38,699 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1733618788593.a700cdd56b87791a9367376798e8d385. 2024-12-08T00:46:38,700 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0f983e3e5be1%2C34109%2C1733618786289.1733618798700 2024-12-08T00:46:38,709 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:46:38,710 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:46:38,710 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:46:38,710 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:46:38,710 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:46:38,711 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/WALs/0f983e3e5be1,34109,1733618786289/0f983e3e5be1%2C34109%2C1733618786289.1733618787644 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/WALs/0f983e3e5be1,34109,1733618786289/0f983e3e5be1%2C34109%2C1733618786289.1733618798700 2024-12-08T00:46:38,712 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34291:34291),(127.0.0.1/127.0.0.1:35101:35101)] 2024-12-08T00:46:38,712 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/WALs/0f983e3e5be1,34109,1733618786289/0f983e3e5be1%2C34109%2C1733618786289.1733618787644 is not closed yet, will try archiving it next time 2024-12-08T00:46:38,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33503 is added to blk_1073741833_1009 (size=451) 2024-12-08T00:46:38,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33661 is added to blk_1073741833_1009 (size=451) 2024-12-08T00:46:38,718 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/WALs/0f983e3e5be1,34109,1733618786289/0f983e3e5be1%2C34109%2C1733618786289.1733618787644 to hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/oldWALs/0f983e3e5be1%2C34109%2C1733618786289.1733618787644 2024-12-08T00:46:38,721 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1733618788593.a700cdd56b87791a9367376798e8d385., hostname=0f983e3e5be1,34109,1733618786289, seqNum=2] 2024-12-08T00:46:50,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34109 {}] regionserver.HRegion(8855): Flush requested on a700cdd56b87791a9367376798e8d385 2024-12-08T00:46:50,777 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a700cdd56b87791a9367376798e8d385 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-08T00:46:50,825 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/default/TestLogRolling-testSlowSyncLogRolling/a700cdd56b87791a9367376798e8d385/.tmp/info/6a55288ab7f04389a20f23675074ae97 is 1080, key is row0001/info:/1733618798724/Put/seqid=0 2024-12-08T00:46:50,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33503 is added to blk_1073741838_1014 (size=12509) 2024-12-08T00:46:50,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33661 is added to blk_1073741838_1014 (size=12509) 2024-12-08T00:46:50,836 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/default/TestLogRolling-testSlowSyncLogRolling/a700cdd56b87791a9367376798e8d385/.tmp/info/6a55288ab7f04389a20f23675074ae97 2024-12-08T00:46:50,877 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/default/TestLogRolling-testSlowSyncLogRolling/a700cdd56b87791a9367376798e8d385/.tmp/info/6a55288ab7f04389a20f23675074ae97 as hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/default/TestLogRolling-testSlowSyncLogRolling/a700cdd56b87791a9367376798e8d385/info/6a55288ab7f04389a20f23675074ae97 2024-12-08T00:46:50,887 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/default/TestLogRolling-testSlowSyncLogRolling/a700cdd56b87791a9367376798e8d385/info/6a55288ab7f04389a20f23675074ae97, entries=7, sequenceid=11, filesize=12.2 K 2024-12-08T00:46:50,895 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for a700cdd56b87791a9367376798e8d385 in 119ms, sequenceid=11, compaction requested=false 2024-12-08T00:46:50,896 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a700cdd56b87791a9367376798e8d385: 2024-12-08T00:46:54,768 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-08T00:46:58,792 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0f983e3e5be1%2C34109%2C1733618786289.1733618818792 2024-12-08T00:46:59,010 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 213 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33661,DS-bf3369fc-a168-4c1b-a21c-8becf5bc2840,DISK], DatanodeInfoWithStorage[127.0.0.1:33503,DS-eeb51b74-7f1b-4d41-8453-2aceb794d2f3,DISK]] 2024-12-08T00:46:59,011 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:46:59,011 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:46:59,011 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:46:59,011 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:46:59,011 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:46:59,012 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/WALs/0f983e3e5be1,34109,1733618786289/0f983e3e5be1%2C34109%2C1733618786289.1733618798700 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/WALs/0f983e3e5be1,34109,1733618786289/0f983e3e5be1%2C34109%2C1733618786289.1733618818792 2024-12-08T00:46:59,013 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34291:34291),(127.0.0.1/127.0.0.1:35101:35101)] 2024-12-08T00:46:59,013 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/WALs/0f983e3e5be1,34109,1733618786289/0f983e3e5be1%2C34109%2C1733618786289.1733618798700 is not closed yet, will try archiving it next time 2024-12-08T00:46:59,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33503 is added to blk_1073741837_1013 (size=12399) 2024-12-08T00:46:59,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33661 is added to blk_1073741837_1013 (size=12399) 2024-12-08T00:46:59,217 INFO [FSHLog-0-hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557-prefix:0f983e3e5be1,34109,1733618786289 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33661,DS-bf3369fc-a168-4c1b-a21c-8becf5bc2840,DISK], DatanodeInfoWithStorage[127.0.0.1:33503,DS-eeb51b74-7f1b-4d41-8453-2aceb794d2f3,DISK]] 2024-12-08T00:47:01,426 INFO [FSHLog-0-hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557-prefix:0f983e3e5be1,34109,1733618786289 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33661,DS-bf3369fc-a168-4c1b-a21c-8becf5bc2840,DISK], DatanodeInfoWithStorage[127.0.0.1:33503,DS-eeb51b74-7f1b-4d41-8453-2aceb794d2f3,DISK]] 2024-12-08T00:47:03,632 INFO [FSHLog-0-hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557-prefix:0f983e3e5be1,34109,1733618786289 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33661,DS-bf3369fc-a168-4c1b-a21c-8becf5bc2840,DISK], DatanodeInfoWithStorage[127.0.0.1:33503,DS-eeb51b74-7f1b-4d41-8453-2aceb794d2f3,DISK]] 2024-12-08T00:47:05,838 INFO [FSHLog-0-hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557-prefix:0f983e3e5be1,34109,1733618786289 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33661,DS-bf3369fc-a168-4c1b-a21c-8becf5bc2840,DISK], DatanodeInfoWithStorage[127.0.0.1:33503,DS-eeb51b74-7f1b-4d41-8453-2aceb794d2f3,DISK]] 2024-12-08T00:47:05,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34109 {}] regionserver.HRegion(8855): Flush requested on a700cdd56b87791a9367376798e8d385 2024-12-08T00:47:05,839 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a700cdd56b87791a9367376798e8d385 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-08T00:47:06,043 INFO [FSHLog-0-hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557-prefix:0f983e3e5be1,34109,1733618786289 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33661,DS-bf3369fc-a168-4c1b-a21c-8becf5bc2840,DISK], DatanodeInfoWithStorage[127.0.0.1:33503,DS-eeb51b74-7f1b-4d41-8453-2aceb794d2f3,DISK]] 2024-12-08T00:47:06,053 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/default/TestLogRolling-testSlowSyncLogRolling/a700cdd56b87791a9367376798e8d385/.tmp/info/4369757aaa454c50aa41f3d0b4919b24 is 1080, key is row0008/info:/1733618812773/Put/seqid=0 2024-12-08T00:47:06,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33661 is added to blk_1073741840_1016 (size=12509) 2024-12-08T00:47:06,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33503 is added to blk_1073741840_1016 (size=12509) 2024-12-08T00:47:06,061 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/default/TestLogRolling-testSlowSyncLogRolling/a700cdd56b87791a9367376798e8d385/.tmp/info/4369757aaa454c50aa41f3d0b4919b24 2024-12-08T00:47:06,070 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/default/TestLogRolling-testSlowSyncLogRolling/a700cdd56b87791a9367376798e8d385/.tmp/info/4369757aaa454c50aa41f3d0b4919b24 as hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/default/TestLogRolling-testSlowSyncLogRolling/a700cdd56b87791a9367376798e8d385/info/4369757aaa454c50aa41f3d0b4919b24 2024-12-08T00:47:06,078 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/default/TestLogRolling-testSlowSyncLogRolling/a700cdd56b87791a9367376798e8d385/info/4369757aaa454c50aa41f3d0b4919b24, entries=7, sequenceid=21, filesize=12.2 K 2024-12-08T00:47:06,282 INFO [FSHLog-0-hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557-prefix:0f983e3e5be1,34109,1733618786289 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33661,DS-bf3369fc-a168-4c1b-a21c-8becf5bc2840,DISK], DatanodeInfoWithStorage[127.0.0.1:33503,DS-eeb51b74-7f1b-4d41-8453-2aceb794d2f3,DISK]] 2024-12-08T00:47:06,282 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for a700cdd56b87791a9367376798e8d385 in 442ms, sequenceid=21, compaction requested=false 2024-12-08T00:47:06,282 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a700cdd56b87791a9367376798e8d385: 2024-12-08T00:47:06,283 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-12-08T00:47:06,283 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T00:47:06,285 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/default/TestLogRolling-testSlowSyncLogRolling/a700cdd56b87791a9367376798e8d385/info/6a55288ab7f04389a20f23675074ae97 because midkey is the same as first or last row 2024-12-08T00:47:08,048 INFO [FSHLog-0-hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557-prefix:0f983e3e5be1,34109,1733618786289 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33661,DS-bf3369fc-a168-4c1b-a21c-8becf5bc2840,DISK], DatanodeInfoWithStorage[127.0.0.1:33503,DS-eeb51b74-7f1b-4d41-8453-2aceb794d2f3,DISK]] 2024-12-08T00:47:08,477 INFO [master/0f983e3e5be1:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-08T00:47:08,478 INFO [master/0f983e3e5be1:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-08T00:47:10,256 INFO [FSHLog-0-hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557-prefix:0f983e3e5be1,34109,1733618786289 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33661,DS-bf3369fc-a168-4c1b-a21c-8becf5bc2840,DISK], DatanodeInfoWithStorage[127.0.0.1:33503,DS-eeb51b74-7f1b-4d41-8453-2aceb794d2f3,DISK]] 2024-12-08T00:47:10,263 WARN [FSHLog-0-hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557-prefix:0f983e3e5be1,34109,1733618786289 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33661,DS-bf3369fc-a168-4c1b-a21c-8becf5bc2840,DISK], DatanodeInfoWithStorage[127.0.0.1:33503,DS-eeb51b74-7f1b-4d41-8453-2aceb794d2f3,DISK]] 2024-12-08T00:47:10,265 DEBUG [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 0f983e3e5be1%2C34109%2C1733618786289:(num 1733618818792) roll requested 2024-12-08T00:47:10,266 INFO [regionserver/0f983e3e5be1:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0f983e3e5be1%2C34109%2C1733618786289.1733618830265 2024-12-08T00:47:10,481 INFO [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 213 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33661,DS-bf3369fc-a168-4c1b-a21c-8becf5bc2840,DISK], DatanodeInfoWithStorage[127.0.0.1:33503,DS-eeb51b74-7f1b-4d41-8453-2aceb794d2f3,DISK]] 2024-12-08T00:47:10,481 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:47:10,481 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:47:10,481 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:47:10,481 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:47:10,481 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:47:10,482 INFO [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/WALs/0f983e3e5be1,34109,1733618786289/0f983e3e5be1%2C34109%2C1733618786289.1733618818792 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/WALs/0f983e3e5be1,34109,1733618786289/0f983e3e5be1%2C34109%2C1733618786289.1733618830265 2024-12-08T00:47:10,482 DEBUG [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35101:35101),(127.0.0.1/127.0.0.1:34291:34291)] 2024-12-08T00:47:10,482 DEBUG [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/WALs/0f983e3e5be1,34109,1733618786289/0f983e3e5be1%2C34109%2C1733618786289.1733618818792 is not closed yet, will try archiving it next time 2024-12-08T00:47:10,483 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/WALs/0f983e3e5be1,34109,1733618786289/0f983e3e5be1%2C34109%2C1733618786289.1733618798700 to hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/oldWALs/0f983e3e5be1%2C34109%2C1733618786289.1733618798700 2024-12-08T00:47:10,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33503 is added to blk_1073741839_1015 (size=7739) 2024-12-08T00:47:10,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33661 is added to blk_1073741839_1015 (size=7739) 2024-12-08T00:47:12,466 INFO [FSHLog-0-hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557-prefix:0f983e3e5be1,34109,1733618786289 {}] wal.AbstractFSWAL(1368): Slow sync cost: 203 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33503,DS-eeb51b74-7f1b-4d41-8453-2aceb794d2f3,DISK], DatanodeInfoWithStorage[127.0.0.1:33661,DS-bf3369fc-a168-4c1b-a21c-8becf5bc2840,DISK]] 2024-12-08T00:47:14,019 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region a700cdd56b87791a9367376798e8d385, had cached 0 bytes from a total of 25018 2024-12-08T00:47:14,674 INFO [FSHLog-0-hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557-prefix:0f983e3e5be1,34109,1733618786289 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33503,DS-eeb51b74-7f1b-4d41-8453-2aceb794d2f3,DISK], DatanodeInfoWithStorage[127.0.0.1:33661,DS-bf3369fc-a168-4c1b-a21c-8becf5bc2840,DISK]] 2024-12-08T00:47:16,880 INFO [FSHLog-0-hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557-prefix:0f983e3e5be1,34109,1733618786289 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33503,DS-eeb51b74-7f1b-4d41-8453-2aceb794d2f3,DISK], DatanodeInfoWithStorage[127.0.0.1:33661,DS-bf3369fc-a168-4c1b-a21c-8becf5bc2840,DISK]] 2024-12-08T00:47:19,087 INFO [FSHLog-0-hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557-prefix:0f983e3e5be1,34109,1733618786289 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33503,DS-eeb51b74-7f1b-4d41-8453-2aceb794d2f3,DISK], DatanodeInfoWithStorage[127.0.0.1:33661,DS-bf3369fc-a168-4c1b-a21c-8becf5bc2840,DISK]] 2024-12-08T00:47:21,090 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-08T00:47:21,091 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0f983e3e5be1%2C34109%2C1733618786289.1733618841091 2024-12-08T00:47:24,768 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-08T00:47:26,108 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5012 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33503,DS-eeb51b74-7f1b-4d41-8453-2aceb794d2f3,DISK], DatanodeInfoWithStorage[127.0.0.1:33661,DS-bf3369fc-a168-4c1b-a21c-8becf5bc2840,DISK]] 2024-12-08T00:47:26,111 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5012 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33503,DS-eeb51b74-7f1b-4d41-8453-2aceb794d2f3,DISK], DatanodeInfoWithStorage[127.0.0.1:33661,DS-bf3369fc-a168-4c1b-a21c-8becf5bc2840,DISK]] 2024-12-08T00:47:26,111 DEBUG [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 0f983e3e5be1%2C34109%2C1733618786289:(num 1733618841091) roll requested 2024-12-08T00:47:26,111 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:47:26,112 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:47:26,112 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:47:26,112 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:47:26,112 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:47:26,113 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/WALs/0f983e3e5be1,34109,1733618786289/0f983e3e5be1%2C34109%2C1733618786289.1733618830265 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/WALs/0f983e3e5be1,34109,1733618786289/0f983e3e5be1%2C34109%2C1733618786289.1733618841091 2024-12-08T00:47:26,114 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34291:34291),(127.0.0.1/127.0.0.1:35101:35101)] 2024-12-08T00:47:26,114 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/WALs/0f983e3e5be1,34109,1733618786289/0f983e3e5be1%2C34109%2C1733618786289.1733618830265 is not closed yet, will try archiving it next time 2024-12-08T00:47:26,114 INFO [regionserver/0f983e3e5be1:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0f983e3e5be1%2C34109%2C1733618786289.1733618846114 2024-12-08T00:47:26,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33661 is added to blk_1073741841_1017 (size=4753) 2024-12-08T00:47:26,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33503 is added to blk_1073741841_1017 (size=4753) 2024-12-08T00:47:31,119 INFO [FSHLog-0-hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557-prefix:0f983e3e5be1,34109,1733618786289 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5002 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33661,DS-bf3369fc-a168-4c1b-a21c-8becf5bc2840,DISK], DatanodeInfoWithStorage[127.0.0.1:33503,DS-eeb51b74-7f1b-4d41-8453-2aceb794d2f3,DISK]] 2024-12-08T00:47:31,119 WARN [FSHLog-0-hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557-prefix:0f983e3e5be1,34109,1733618786289 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5002 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33661,DS-bf3369fc-a168-4c1b-a21c-8becf5bc2840,DISK], DatanodeInfoWithStorage[127.0.0.1:33503,DS-eeb51b74-7f1b-4d41-8453-2aceb794d2f3,DISK]] 2024-12-08T00:47:31,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34109 {}] regionserver.HRegion(8855): Flush requested on a700cdd56b87791a9367376798e8d385 2024-12-08T00:47:31,120 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a700cdd56b87791a9367376798e8d385 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-08T00:47:31,164 INFO [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5045 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33661,DS-bf3369fc-a168-4c1b-a21c-8becf5bc2840,DISK], DatanodeInfoWithStorage[127.0.0.1:33503,DS-eeb51b74-7f1b-4d41-8453-2aceb794d2f3,DISK]] 2024-12-08T00:47:31,164 WARN [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5045 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33661,DS-bf3369fc-a168-4c1b-a21c-8becf5bc2840,DISK], DatanodeInfoWithStorage[127.0.0.1:33503,DS-eeb51b74-7f1b-4d41-8453-2aceb794d2f3,DISK]] 2024-12-08T00:47:33,122 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-08T00:47:36,155 INFO [FSHLog-0-hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557-prefix:0f983e3e5be1,34109,1733618786289 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5032 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33661,DS-bf3369fc-a168-4c1b-a21c-8becf5bc2840,DISK], DatanodeInfoWithStorage[127.0.0.1:33503,DS-eeb51b74-7f1b-4d41-8453-2aceb794d2f3,DISK]] 2024-12-08T00:47:36,155 WARN [FSHLog-0-hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557-prefix:0f983e3e5be1,34109,1733618786289 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5032 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33661,DS-bf3369fc-a168-4c1b-a21c-8becf5bc2840,DISK], DatanodeInfoWithStorage[127.0.0.1:33503,DS-eeb51b74-7f1b-4d41-8453-2aceb794d2f3,DISK]] 2024-12-08T00:47:36,155 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:47:36,156 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:47:36,157 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:47:36,157 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:47:36,158 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:47:36,159 INFO [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/WALs/0f983e3e5be1,34109,1733618786289/0f983e3e5be1%2C34109%2C1733618786289.1733618841091 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/WALs/0f983e3e5be1,34109,1733618786289/0f983e3e5be1%2C34109%2C1733618786289.1733618846114 2024-12-08T00:47:36,160 DEBUG [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34291:34291),(127.0.0.1/127.0.0.1:35101:35101)] 2024-12-08T00:47:36,160 DEBUG [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/WALs/0f983e3e5be1,34109,1733618786289/0f983e3e5be1%2C34109%2C1733618786289.1733618841091 is not closed yet, will try archiving it next time 2024-12-08T00:47:36,160 DEBUG [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 0f983e3e5be1%2C34109%2C1733618786289:(num 1733618846114) roll requested 2024-12-08T00:47:36,161 INFO [regionserver/0f983e3e5be1:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0f983e3e5be1%2C34109%2C1733618786289.1733618856161 2024-12-08T00:47:36,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33503 is added to blk_1073741842_1018 (size=1569) 2024-12-08T00:47:36,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33661 is added to blk_1073741842_1018 (size=1569) 2024-12-08T00:47:36,164 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/default/TestLogRolling-testSlowSyncLogRolling/a700cdd56b87791a9367376798e8d385/.tmp/info/ee1c5a6a63934ff4b7dc453dfa7f5305 is 1080, key is row0015/info:/1733618827844/Put/seqid=0 2024-12-08T00:47:36,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33661 is added to blk_1073741844_1020 (size=12509) 2024-12-08T00:47:36,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33503 is added to blk_1073741844_1020 (size=12509) 2024-12-08T00:47:36,171 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/default/TestLogRolling-testSlowSyncLogRolling/a700cdd56b87791a9367376798e8d385/.tmp/info/ee1c5a6a63934ff4b7dc453dfa7f5305 2024-12-08T00:47:36,179 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/default/TestLogRolling-testSlowSyncLogRolling/a700cdd56b87791a9367376798e8d385/.tmp/info/ee1c5a6a63934ff4b7dc453dfa7f5305 as hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/default/TestLogRolling-testSlowSyncLogRolling/a700cdd56b87791a9367376798e8d385/info/ee1c5a6a63934ff4b7dc453dfa7f5305 2024-12-08T00:47:36,188 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/default/TestLogRolling-testSlowSyncLogRolling/a700cdd56b87791a9367376798e8d385/info/ee1c5a6a63934ff4b7dc453dfa7f5305, entries=7, sequenceid=31, filesize=12.2 K 2024-12-08T00:47:41,211 INFO [FSHLog-0-hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557-prefix:0f983e3e5be1,34109,1733618786289 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5022 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33661,DS-bf3369fc-a168-4c1b-a21c-8becf5bc2840,DISK], DatanodeInfoWithStorage[127.0.0.1:33503,DS-eeb51b74-7f1b-4d41-8453-2aceb794d2f3,DISK]] 2024-12-08T00:47:41,212 WARN [FSHLog-0-hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557-prefix:0f983e3e5be1,34109,1733618786289 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5022 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33661,DS-bf3369fc-a168-4c1b-a21c-8becf5bc2840,DISK], DatanodeInfoWithStorage[127.0.0.1:33503,DS-eeb51b74-7f1b-4d41-8453-2aceb794d2f3,DISK]] 2024-12-08T00:47:41,212 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for a700cdd56b87791a9367376798e8d385 in 10091ms, sequenceid=31, compaction requested=true 2024-12-08T00:47:41,212 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a700cdd56b87791a9367376798e8d385: 2024-12-08T00:47:41,213 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-12-08T00:47:41,213 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T00:47:41,213 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/default/TestLogRolling-testSlowSyncLogRolling/a700cdd56b87791a9367376798e8d385/info/6a55288ab7f04389a20f23675074ae97 because midkey is the same as first or last row 2024-12-08T00:47:41,217 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a700cdd56b87791a9367376798e8d385:info, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:47:41,217 INFO [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5052 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33661,DS-bf3369fc-a168-4c1b-a21c-8becf5bc2840,DISK], DatanodeInfoWithStorage[127.0.0.1:33503,DS-eeb51b74-7f1b-4d41-8453-2aceb794d2f3,DISK]] 2024-12-08T00:47:41,217 WARN [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5052 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33661,DS-bf3369fc-a168-4c1b-a21c-8becf5bc2840,DISK], DatanodeInfoWithStorage[127.0.0.1:33503,DS-eeb51b74-7f1b-4d41-8453-2aceb794d2f3,DISK]] 2024-12-08T00:47:41,218 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:47:41,218 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:47:41,218 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:47:41,218 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:47:41,219 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:47:41,219 INFO [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/WALs/0f983e3e5be1,34109,1733618786289/0f983e3e5be1%2C34109%2C1733618786289.1733618846114 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/WALs/0f983e3e5be1,34109,1733618786289/0f983e3e5be1%2C34109%2C1733618786289.1733618856161 2024-12-08T00:47:41,220 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:47:41,220 DEBUG [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34291:34291),(127.0.0.1/127.0.0.1:35101:35101)] 2024-12-08T00:47:41,220 DEBUG [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/WALs/0f983e3e5be1,34109,1733618786289/0f983e3e5be1%2C34109%2C1733618786289.1733618846114 is not closed yet, will try archiving it next time 2024-12-08T00:47:41,221 DEBUG [RS:0;0f983e3e5be1:34109-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:47:41,221 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/WALs/0f983e3e5be1,34109,1733618786289/0f983e3e5be1%2C34109%2C1733618786289.1733618818792 to hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/oldWALs/0f983e3e5be1%2C34109%2C1733618786289.1733618818792 2024-12-08T00:47:41,221 DEBUG [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 0f983e3e5be1%2C34109%2C1733618786289:(num 1733618856161) roll requested 2024-12-08T00:47:41,221 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0f983e3e5be1%2C34109%2C1733618786289.1733618861221 2024-12-08T00:47:41,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33503 is added to blk_1073741843_1019 (size=438) 2024-12-08T00:47:41,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33661 is added to blk_1073741843_1019 (size=438) 2024-12-08T00:47:41,224 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/WALs/0f983e3e5be1,34109,1733618786289/0f983e3e5be1%2C34109%2C1733618786289.1733618830265 to hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/oldWALs/0f983e3e5be1%2C34109%2C1733618786289.1733618830265 2024-12-08T00:47:41,225 DEBUG [RS:0;0f983e3e5be1:34109-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:47:41,226 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/WALs/0f983e3e5be1,34109,1733618786289/0f983e3e5be1%2C34109%2C1733618786289.1733618841091 to hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/oldWALs/0f983e3e5be1%2C34109%2C1733618786289.1733618841091 2024-12-08T00:47:41,227 DEBUG [RS:0;0f983e3e5be1:34109-shortCompactions-0 {}] regionserver.HStore(1541): a700cdd56b87791a9367376798e8d385/info is initiating minor compaction (all files) 2024-12-08T00:47:41,227 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/WALs/0f983e3e5be1,34109,1733618786289/0f983e3e5be1%2C34109%2C1733618786289.1733618846114 to hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/oldWALs/0f983e3e5be1%2C34109%2C1733618786289.1733618846114 2024-12-08T00:47:41,227 INFO [RS:0;0f983e3e5be1:34109-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of a700cdd56b87791a9367376798e8d385/info in TestLogRolling-testSlowSyncLogRolling,,1733618788593.a700cdd56b87791a9367376798e8d385. 2024-12-08T00:47:41,228 INFO [RS:0;0f983e3e5be1:34109-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/default/TestLogRolling-testSlowSyncLogRolling/a700cdd56b87791a9367376798e8d385/info/6a55288ab7f04389a20f23675074ae97, hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/default/TestLogRolling-testSlowSyncLogRolling/a700cdd56b87791a9367376798e8d385/info/4369757aaa454c50aa41f3d0b4919b24, hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/default/TestLogRolling-testSlowSyncLogRolling/a700cdd56b87791a9367376798e8d385/info/ee1c5a6a63934ff4b7dc453dfa7f5305] into tmpdir=hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/default/TestLogRolling-testSlowSyncLogRolling/a700cdd56b87791a9367376798e8d385/.tmp, totalSize=36.6 K 2024-12-08T00:47:41,229 DEBUG [RS:0;0f983e3e5be1:34109-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6a55288ab7f04389a20f23675074ae97, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733618798724 2024-12-08T00:47:41,230 DEBUG [RS:0;0f983e3e5be1:34109-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4369757aaa454c50aa41f3d0b4919b24, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1733618812773 2024-12-08T00:47:41,231 DEBUG [RS:0;0f983e3e5be1:34109-shortCompactions-0 {}] compactions.Compactor(225): Compacting ee1c5a6a63934ff4b7dc453dfa7f5305, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1733618827844 2024-12-08T00:47:41,234 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:47:41,235 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:47:41,235 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:47:41,235 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:47:41,235 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:47:41,235 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/WALs/0f983e3e5be1,34109,1733618786289/0f983e3e5be1%2C34109%2C1733618786289.1733618856161 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/WALs/0f983e3e5be1,34109,1733618786289/0f983e3e5be1%2C34109%2C1733618786289.1733618861221 2024-12-08T00:47:41,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33503 is added to blk_1073741845_1021 (size=93) 2024-12-08T00:47:41,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33661 is added to blk_1073741845_1021 (size=93) 2024-12-08T00:47:41,238 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34291:34291),(127.0.0.1/127.0.0.1:35101:35101)] 2024-12-08T00:47:41,238 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/WALs/0f983e3e5be1,34109,1733618786289/0f983e3e5be1%2C34109%2C1733618786289.1733618856161 is not closed yet, will try archiving it next time 2024-12-08T00:47:41,238 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/WALs/0f983e3e5be1,34109,1733618786289/0f983e3e5be1%2C34109%2C1733618786289.1733618856161 to hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/oldWALs/0f983e3e5be1%2C34109%2C1733618786289.1733618856161 2024-12-08T00:47:41,239 INFO [regionserver/0f983e3e5be1:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0f983e3e5be1%2C34109%2C1733618786289.1733618861239 2024-12-08T00:47:41,249 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:47:41,249 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:47:41,249 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:47:41,249 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:47:41,249 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:47:41,250 INFO [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/WALs/0f983e3e5be1,34109,1733618786289/0f983e3e5be1%2C34109%2C1733618786289.1733618861221 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/WALs/0f983e3e5be1,34109,1733618786289/0f983e3e5be1%2C34109%2C1733618786289.1733618861239 2024-12-08T00:47:41,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33503 is added to blk_1073741846_1022 (size=1258) 2024-12-08T00:47:41,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33661 is added to blk_1073741846_1022 (size=1258) 2024-12-08T00:47:41,258 DEBUG [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34291:34291),(127.0.0.1/127.0.0.1:35101:35101)] 2024-12-08T00:47:41,259 DEBUG [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/WALs/0f983e3e5be1,34109,1733618786289/0f983e3e5be1%2C34109%2C1733618786289.1733618861221 is not closed yet, will try archiving it next time 2024-12-08T00:47:41,268 INFO [RS:0;0f983e3e5be1:34109-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a700cdd56b87791a9367376798e8d385#info#compaction#3 average throughput is 21.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:47:41,269 DEBUG [RS:0;0f983e3e5be1:34109-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/default/TestLogRolling-testSlowSyncLogRolling/a700cdd56b87791a9367376798e8d385/.tmp/info/0a5f6ef179b44770a5b882e486b248a0 is 1080, key is row0001/info:/1733618798724/Put/seqid=0 2024-12-08T00:47:41,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33503 is added to blk_1073741848_1024 (size=27710) 2024-12-08T00:47:41,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33661 is added to blk_1073741848_1024 (size=27710) 2024-12-08T00:47:41,285 DEBUG [RS:0;0f983e3e5be1:34109-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/default/TestLogRolling-testSlowSyncLogRolling/a700cdd56b87791a9367376798e8d385/.tmp/info/0a5f6ef179b44770a5b882e486b248a0 as hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/default/TestLogRolling-testSlowSyncLogRolling/a700cdd56b87791a9367376798e8d385/info/0a5f6ef179b44770a5b882e486b248a0 2024-12-08T00:47:41,300 INFO [RS:0;0f983e3e5be1:34109-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in a700cdd56b87791a9367376798e8d385/info of a700cdd56b87791a9367376798e8d385 into 0a5f6ef179b44770a5b882e486b248a0(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:47:41,300 DEBUG [RS:0;0f983e3e5be1:34109-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for a700cdd56b87791a9367376798e8d385: 2024-12-08T00:47:41,301 INFO [RS:0;0f983e3e5be1:34109-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1733618788593.a700cdd56b87791a9367376798e8d385., storeName=a700cdd56b87791a9367376798e8d385/info, priority=13, startTime=1733618861217; duration=0sec 2024-12-08T00:47:41,302 DEBUG [RS:0;0f983e3e5be1:34109-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-12-08T00:47:41,302 DEBUG [RS:0;0f983e3e5be1:34109-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T00:47:41,302 DEBUG [RS:0;0f983e3e5be1:34109-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/default/TestLogRolling-testSlowSyncLogRolling/a700cdd56b87791a9367376798e8d385/info/0a5f6ef179b44770a5b882e486b248a0 because midkey is the same as first or last row 2024-12-08T00:47:41,302 DEBUG [RS:0;0f983e3e5be1:34109-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-12-08T00:47:41,302 DEBUG [RS:0;0f983e3e5be1:34109-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T00:47:41,302 DEBUG [RS:0;0f983e3e5be1:34109-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/default/TestLogRolling-testSlowSyncLogRolling/a700cdd56b87791a9367376798e8d385/info/0a5f6ef179b44770a5b882e486b248a0 because midkey is the same as first or last row 2024-12-08T00:47:41,302 DEBUG [RS:0;0f983e3e5be1:34109-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-12-08T00:47:41,302 DEBUG [RS:0;0f983e3e5be1:34109-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T00:47:41,302 DEBUG [RS:0;0f983e3e5be1:34109-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/default/TestLogRolling-testSlowSyncLogRolling/a700cdd56b87791a9367376798e8d385/info/0a5f6ef179b44770a5b882e486b248a0 because midkey is the same as first or last row 2024-12-08T00:47:41,302 DEBUG [RS:0;0f983e3e5be1:34109-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:47:41,303 DEBUG [RS:0;0f983e3e5be1:34109-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a700cdd56b87791a9367376798e8d385:info 2024-12-08T00:47:53,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34109 {}] regionserver.HRegion(8855): Flush requested on a700cdd56b87791a9367376798e8d385 2024-12-08T00:47:53,287 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a700cdd56b87791a9367376798e8d385 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-08T00:47:53,294 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/default/TestLogRolling-testSlowSyncLogRolling/a700cdd56b87791a9367376798e8d385/.tmp/info/fc9f25534ab849beaa82ad962da1a0aa is 1080, key is row0022/info:/1733618861240/Put/seqid=0 2024-12-08T00:47:53,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33661 is added to blk_1073741849_1025 (size=12509) 2024-12-08T00:47:53,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33503 is added to blk_1073741849_1025 (size=12509) 2024-12-08T00:47:53,302 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/default/TestLogRolling-testSlowSyncLogRolling/a700cdd56b87791a9367376798e8d385/.tmp/info/fc9f25534ab849beaa82ad962da1a0aa 2024-12-08T00:47:53,314 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/default/TestLogRolling-testSlowSyncLogRolling/a700cdd56b87791a9367376798e8d385/.tmp/info/fc9f25534ab849beaa82ad962da1a0aa as hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/default/TestLogRolling-testSlowSyncLogRolling/a700cdd56b87791a9367376798e8d385/info/fc9f25534ab849beaa82ad962da1a0aa 2024-12-08T00:47:53,321 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/default/TestLogRolling-testSlowSyncLogRolling/a700cdd56b87791a9367376798e8d385/info/fc9f25534ab849beaa82ad962da1a0aa, entries=7, sequenceid=42, filesize=12.2 K 2024-12-08T00:47:53,322 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for a700cdd56b87791a9367376798e8d385 in 35ms, sequenceid=42, compaction requested=false 2024-12-08T00:47:53,322 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a700cdd56b87791a9367376798e8d385: 2024-12-08T00:47:53,323 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-12-08T00:47:53,323 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T00:47:53,323 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/default/TestLogRolling-testSlowSyncLogRolling/a700cdd56b87791a9367376798e8d385/info/0a5f6ef179b44770a5b882e486b248a0 because midkey is the same as first or last row 2024-12-08T00:47:54,768 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-08T00:47:59,019 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region a700cdd56b87791a9367376798e8d385, had cached 0 bytes from a total of 40219 2024-12-08T00:48:01,312 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-08T00:48:01,313 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-08T00:48:01,313 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T00:48:01,324 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:48:01,324 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:48:01,324 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-08T00:48:01,324 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-08T00:48:01,324 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1319998913, stopped=false 2024-12-08T00:48:01,325 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=0f983e3e5be1,40409,1733618785607 2024-12-08T00:48:01,388 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40409-0x10002f118c80000, quorum=127.0.0.1:49878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T00:48:01,389 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34109-0x10002f118c80001, quorum=127.0.0.1:49878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T00:48:01,389 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34109-0x10002f118c80001, quorum=127.0.0.1:49878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:48:01,389 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40409-0x10002f118c80000, quorum=127.0.0.1:49878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:48:01,389 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-08T00:48:01,389 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-08T00:48:01,390 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T00:48:01,390 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:48:01,390 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40409-0x10002f118c80000, quorum=127.0.0.1:49878, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T00:48:01,390 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34109-0x10002f118c80001, quorum=127.0.0.1:49878, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T00:48:01,391 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '0f983e3e5be1,34109,1733618786289' ***** 2024-12-08T00:48:01,391 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-08T00:48:01,392 INFO [RS:0;0f983e3e5be1:34109 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-08T00:48:01,393 INFO [RS:0;0f983e3e5be1:34109 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-08T00:48:01,393 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-08T00:48:01,393 INFO [RS:0;0f983e3e5be1:34109 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-08T00:48:01,393 INFO [RS:0;0f983e3e5be1:34109 {}] regionserver.HRegionServer(3091): Received CLOSE for a700cdd56b87791a9367376798e8d385 2024-12-08T00:48:01,394 INFO [RS:0;0f983e3e5be1:34109 {}] regionserver.HRegionServer(959): stopping server 0f983e3e5be1,34109,1733618786289 2024-12-08T00:48:01,394 INFO [RS:0;0f983e3e5be1:34109 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T00:48:01,394 INFO [RS:0;0f983e3e5be1:34109 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;0f983e3e5be1:34109. 2024-12-08T00:48:01,395 DEBUG [RS:0;0f983e3e5be1:34109 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T00:48:01,395 DEBUG [RS:0;0f983e3e5be1:34109 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:48:01,395 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing a700cdd56b87791a9367376798e8d385, disabling compactions & flushes 2024-12-08T00:48:01,395 INFO [RS:0;0f983e3e5be1:34109 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-08T00:48:01,395 INFO [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1733618788593.a700cdd56b87791a9367376798e8d385. 2024-12-08T00:48:01,395 INFO [RS:0;0f983e3e5be1:34109 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-08T00:48:01,395 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1733618788593.a700cdd56b87791a9367376798e8d385. 2024-12-08T00:48:01,395 INFO [RS:0;0f983e3e5be1:34109 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-08T00:48:01,395 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1733618788593.a700cdd56b87791a9367376798e8d385. after waiting 0 ms 2024-12-08T00:48:01,395 INFO [RS:0;0f983e3e5be1:34109 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-08T00:48:01,395 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1733618788593.a700cdd56b87791a9367376798e8d385. 2024-12-08T00:48:01,396 INFO [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing a700cdd56b87791a9367376798e8d385 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-12-08T00:48:01,396 INFO [RS:0;0f983e3e5be1:34109 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-08T00:48:01,396 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-08T00:48:01,396 DEBUG [RS:0;0f983e3e5be1:34109 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, a700cdd56b87791a9367376798e8d385=TestLogRolling-testSlowSyncLogRolling,,1733618788593.a700cdd56b87791a9367376798e8d385.} 2024-12-08T00:48:01,396 INFO [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-08T00:48:01,397 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-08T00:48:01,397 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-08T00:48:01,397 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-08T00:48:01,397 DEBUG [RS:0;0f983e3e5be1:34109 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, a700cdd56b87791a9367376798e8d385 2024-12-08T00:48:01,397 INFO [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-12-08T00:48:01,402 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/default/TestLogRolling-testSlowSyncLogRolling/a700cdd56b87791a9367376798e8d385/.tmp/info/a3a1c6cd5d3b4e60be4d23f4fdc6c47f is 1080, key is row0029/info:/1733618875290/Put/seqid=0 2024-12-08T00:48:01,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33503 is added to blk_1073741850_1026 (size=8193) 2024-12-08T00:48:01,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33661 is added to blk_1073741850_1026 (size=8193) 2024-12-08T00:48:01,409 INFO [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/default/TestLogRolling-testSlowSyncLogRolling/a700cdd56b87791a9367376798e8d385/.tmp/info/a3a1c6cd5d3b4e60be4d23f4fdc6c47f 2024-12-08T00:48:01,417 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/hbase/meta/1588230740/.tmp/info/dc5f40afd71b4ca18afec3d7941ed4db is 195, key is TestLogRolling-testSlowSyncLogRolling,,1733618788593.a700cdd56b87791a9367376798e8d385./info:regioninfo/1733618789042/Put/seqid=0 2024-12-08T00:48:01,419 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/default/TestLogRolling-testSlowSyncLogRolling/a700cdd56b87791a9367376798e8d385/.tmp/info/a3a1c6cd5d3b4e60be4d23f4fdc6c47f as hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/default/TestLogRolling-testSlowSyncLogRolling/a700cdd56b87791a9367376798e8d385/info/a3a1c6cd5d3b4e60be4d23f4fdc6c47f 2024-12-08T00:48:01,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33661 is added to blk_1073741851_1027 (size=7016) 2024-12-08T00:48:01,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33503 is added to blk_1073741851_1027 (size=7016) 2024-12-08T00:48:01,424 INFO [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/hbase/meta/1588230740/.tmp/info/dc5f40afd71b4ca18afec3d7941ed4db 2024-12-08T00:48:01,428 INFO [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/default/TestLogRolling-testSlowSyncLogRolling/a700cdd56b87791a9367376798e8d385/info/a3a1c6cd5d3b4e60be4d23f4fdc6c47f, entries=3, sequenceid=48, filesize=8.0 K 2024-12-08T00:48:01,429 INFO [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for a700cdd56b87791a9367376798e8d385 in 33ms, sequenceid=48, compaction requested=true 2024-12-08T00:48:01,430 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733618788593.a700cdd56b87791a9367376798e8d385.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/default/TestLogRolling-testSlowSyncLogRolling/a700cdd56b87791a9367376798e8d385/info/6a55288ab7f04389a20f23675074ae97, hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/default/TestLogRolling-testSlowSyncLogRolling/a700cdd56b87791a9367376798e8d385/info/4369757aaa454c50aa41f3d0b4919b24, hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/default/TestLogRolling-testSlowSyncLogRolling/a700cdd56b87791a9367376798e8d385/info/ee1c5a6a63934ff4b7dc453dfa7f5305] to archive 2024-12-08T00:48:01,433 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733618788593.a700cdd56b87791a9367376798e8d385.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-08T00:48:01,437 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733618788593.a700cdd56b87791a9367376798e8d385.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/default/TestLogRolling-testSlowSyncLogRolling/a700cdd56b87791a9367376798e8d385/info/6a55288ab7f04389a20f23675074ae97 to hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/archive/data/default/TestLogRolling-testSlowSyncLogRolling/a700cdd56b87791a9367376798e8d385/info/6a55288ab7f04389a20f23675074ae97 2024-12-08T00:48:01,438 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733618788593.a700cdd56b87791a9367376798e8d385.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/default/TestLogRolling-testSlowSyncLogRolling/a700cdd56b87791a9367376798e8d385/info/4369757aaa454c50aa41f3d0b4919b24 to hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/archive/data/default/TestLogRolling-testSlowSyncLogRolling/a700cdd56b87791a9367376798e8d385/info/4369757aaa454c50aa41f3d0b4919b24 2024-12-08T00:48:01,440 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733618788593.a700cdd56b87791a9367376798e8d385.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/default/TestLogRolling-testSlowSyncLogRolling/a700cdd56b87791a9367376798e8d385/info/ee1c5a6a63934ff4b7dc453dfa7f5305 to hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/archive/data/default/TestLogRolling-testSlowSyncLogRolling/a700cdd56b87791a9367376798e8d385/info/ee1c5a6a63934ff4b7dc453dfa7f5305 2024-12-08T00:48:01,446 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/hbase/meta/1588230740/.tmp/ns/71bb268aa96649f49931bc16264ad30a is 43, key is default/ns:d/1733618788339/Put/seqid=0 2024-12-08T00:48:01,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33503 is added to blk_1073741852_1028 (size=5153) 2024-12-08T00:48:01,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33661 is added to blk_1073741852_1028 (size=5153) 2024-12-08T00:48:01,452 INFO [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/hbase/meta/1588230740/.tmp/ns/71bb268aa96649f49931bc16264ad30a 2024-12-08T00:48:01,451 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733618788593.a700cdd56b87791a9367376798e8d385.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=0f983e3e5be1:40409 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-12-08T00:48:01,455 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733618788593.a700cdd56b87791a9367376798e8d385.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [6a55288ab7f04389a20f23675074ae97=12509, 4369757aaa454c50aa41f3d0b4919b24=12509, ee1c5a6a63934ff4b7dc453dfa7f5305=12509] 2024-12-08T00:48:01,461 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/default/TestLogRolling-testSlowSyncLogRolling/a700cdd56b87791a9367376798e8d385/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-12-08T00:48:01,463 INFO [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1733618788593.a700cdd56b87791a9367376798e8d385. 2024-12-08T00:48:01,463 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for a700cdd56b87791a9367376798e8d385: Waiting for close lock at 1733618881394Running coprocessor pre-close hooks at 1733618881395 (+1 ms)Disabling compacts and flushes for region at 1733618881395Disabling writes for close at 1733618881395Obtaining lock to block concurrent updates at 1733618881396 (+1 ms)Preparing flush snapshotting stores in a700cdd56b87791a9367376798e8d385 at 1733618881396Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1733618788593.a700cdd56b87791a9367376798e8d385., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1733618881396Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1733618788593.a700cdd56b87791a9367376798e8d385. at 1733618881398 (+2 ms)Flushing a700cdd56b87791a9367376798e8d385/info: creating writer at 1733618881398Flushing a700cdd56b87791a9367376798e8d385/info: appending metadata at 1733618881402 (+4 ms)Flushing a700cdd56b87791a9367376798e8d385/info: closing flushed file at 1733618881402Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@29325c82: reopening flushed file at 1733618881418 (+16 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for a700cdd56b87791a9367376798e8d385 in 33ms, sequenceid=48, compaction requested=true at 1733618881429 (+11 ms)Writing region close event to WAL at 1733618881456 (+27 ms)Running coprocessor post-close hooks at 1733618881461 (+5 ms)Closed at 1733618881463 (+2 ms) 2024-12-08T00:48:01,464 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1733618788593.a700cdd56b87791a9367376798e8d385. 2024-12-08T00:48:01,473 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/hbase/meta/1588230740/.tmp/table/0aa66ea2c515485aa24c4b200dcecf55 is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1733618789056/Put/seqid=0 2024-12-08T00:48:01,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33503 is added to blk_1073741853_1029 (size=5396) 2024-12-08T00:48:01,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33661 is added to blk_1073741853_1029 (size=5396) 2024-12-08T00:48:01,479 INFO [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/hbase/meta/1588230740/.tmp/table/0aa66ea2c515485aa24c4b200dcecf55 2024-12-08T00:48:01,486 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/hbase/meta/1588230740/.tmp/info/dc5f40afd71b4ca18afec3d7941ed4db as hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/hbase/meta/1588230740/info/dc5f40afd71b4ca18afec3d7941ed4db 2024-12-08T00:48:01,494 INFO [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/hbase/meta/1588230740/info/dc5f40afd71b4ca18afec3d7941ed4db, entries=10, sequenceid=11, filesize=6.9 K 2024-12-08T00:48:01,496 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/hbase/meta/1588230740/.tmp/ns/71bb268aa96649f49931bc16264ad30a as hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/hbase/meta/1588230740/ns/71bb268aa96649f49931bc16264ad30a 2024-12-08T00:48:01,497 INFO [regionserver/0f983e3e5be1:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-08T00:48:01,497 INFO [regionserver/0f983e3e5be1:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-08T00:48:01,499 INFO [regionserver/0f983e3e5be1:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T00:48:01,503 INFO [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/hbase/meta/1588230740/ns/71bb268aa96649f49931bc16264ad30a, entries=2, sequenceid=11, filesize=5.0 K 2024-12-08T00:48:01,504 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/hbase/meta/1588230740/.tmp/table/0aa66ea2c515485aa24c4b200dcecf55 as hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/hbase/meta/1588230740/table/0aa66ea2c515485aa24c4b200dcecf55 2024-12-08T00:48:01,512 INFO [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/hbase/meta/1588230740/table/0aa66ea2c515485aa24c4b200dcecf55, entries=2, sequenceid=11, filesize=5.3 K 2024-12-08T00:48:01,514 INFO [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 117ms, sequenceid=11, compaction requested=false 2024-12-08T00:48:01,520 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-08T00:48:01,520 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-08T00:48:01,520 INFO [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-08T00:48:01,521 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733618881396Running coprocessor pre-close hooks at 1733618881396Disabling compacts and flushes for region at 1733618881396Disabling writes for close at 1733618881397 (+1 ms)Obtaining lock to block concurrent updates at 1733618881397Preparing flush snapshotting stores in 1588230740 at 1733618881397Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1733618881398 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733618881399 (+1 ms)Flushing 1588230740/info: creating writer at 1733618881399Flushing 1588230740/info: appending metadata at 1733618881417 (+18 ms)Flushing 1588230740/info: closing flushed file at 1733618881417Flushing 1588230740/ns: creating writer at 1733618881432 (+15 ms)Flushing 1588230740/ns: appending metadata at 1733618881446 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1733618881446Flushing 1588230740/table: creating writer at 1733618881460 (+14 ms)Flushing 1588230740/table: appending metadata at 1733618881473 (+13 ms)Flushing 1588230740/table: closing flushed file at 1733618881473Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@52c08952: reopening flushed file at 1733618881485 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@78b8311a: reopening flushed file at 1733618881494 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5459884b: reopening flushed file at 1733618881503 (+9 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 117ms, sequenceid=11, compaction requested=false at 1733618881514 (+11 ms)Writing region close event to WAL at 1733618881515 (+1 ms)Running coprocessor post-close hooks at 1733618881520 (+5 ms)Closed at 1733618881520 2024-12-08T00:48:01,521 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-08T00:48:01,598 INFO [RS:0;0f983e3e5be1:34109 {}] regionserver.HRegionServer(976): stopping server 0f983e3e5be1,34109,1733618786289; all regions closed. 2024-12-08T00:48:01,600 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:01,600 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:01,601 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:01,601 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:01,601 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:01,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33503 is added to blk_1073741834_1010 (size=3066) 2024-12-08T00:48:01,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33661 is added to blk_1073741834_1010 (size=3066) 2024-12-08T00:48:01,611 DEBUG [RS:0;0f983e3e5be1:34109 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/oldWALs 2024-12-08T00:48:01,611 INFO [RS:0;0f983e3e5be1:34109 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 0f983e3e5be1%2C34109%2C1733618786289.meta:.meta(num 1733618788157) 2024-12-08T00:48:01,612 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:01,612 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:01,612 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:01,612 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:01,613 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:01,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33503 is added to blk_1073741847_1023 (size=12695) 2024-12-08T00:48:01,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33661 is added to blk_1073741847_1023 (size=12695) 2024-12-08T00:48:01,618 DEBUG [RS:0;0f983e3e5be1:34109 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/oldWALs 2024-12-08T00:48:01,618 INFO [RS:0;0f983e3e5be1:34109 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 0f983e3e5be1%2C34109%2C1733618786289:(num 1733618861239) 2024-12-08T00:48:01,618 DEBUG [RS:0;0f983e3e5be1:34109 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:48:01,618 INFO [RS:0;0f983e3e5be1:34109 {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T00:48:01,619 INFO [RS:0;0f983e3e5be1:34109 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T00:48:01,619 INFO [RS:0;0f983e3e5be1:34109 {}] hbase.ChoreService(370): Chore service for: regionserver/0f983e3e5be1:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-08T00:48:01,619 INFO [RS:0;0f983e3e5be1:34109 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T00:48:01,619 INFO [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T00:48:01,620 INFO [RS:0;0f983e3e5be1:34109 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34109 2024-12-08T00:48:01,655 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34109-0x10002f118c80001, quorum=127.0.0.1:49878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/0f983e3e5be1,34109,1733618786289 2024-12-08T00:48:01,655 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40409-0x10002f118c80000, quorum=127.0.0.1:49878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T00:48:01,655 INFO [RS:0;0f983e3e5be1:34109 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T00:48:01,715 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [0f983e3e5be1,34109,1733618786289] 2024-12-08T00:48:01,722 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/0f983e3e5be1,34109,1733618786289 already deleted, retry=false 2024-12-08T00:48:01,723 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 0f983e3e5be1,34109,1733618786289 expired; onlineServers=0 2024-12-08T00:48:01,723 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '0f983e3e5be1,40409,1733618785607' ***** 2024-12-08T00:48:01,723 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-08T00:48:01,723 INFO [M:0;0f983e3e5be1:40409 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T00:48:01,723 INFO [M:0;0f983e3e5be1:40409 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T00:48:01,724 DEBUG [M:0;0f983e3e5be1:40409 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-08T00:48:01,724 DEBUG [M:0;0f983e3e5be1:40409 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-08T00:48:01,724 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-08T00:48:01,724 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster-HFileCleaner.small.0-1733618787396 {}] cleaner.HFileCleaner(306): Exit Thread[master/0f983e3e5be1:0:becomeActiveMaster-HFileCleaner.small.0-1733618787396,5,FailOnTimeoutGroup] 2024-12-08T00:48:01,724 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster-HFileCleaner.large.0-1733618787390 {}] cleaner.HFileCleaner(306): Exit Thread[master/0f983e3e5be1:0:becomeActiveMaster-HFileCleaner.large.0-1733618787390,5,FailOnTimeoutGroup] 2024-12-08T00:48:01,725 INFO [M:0;0f983e3e5be1:40409 {}] hbase.ChoreService(370): Chore service for: master/0f983e3e5be1:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-08T00:48:01,725 INFO [M:0;0f983e3e5be1:40409 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T00:48:01,725 DEBUG [M:0;0f983e3e5be1:40409 {}] master.HMaster(1795): Stopping service threads 2024-12-08T00:48:01,725 INFO [M:0;0f983e3e5be1:40409 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-08T00:48:01,725 INFO [M:0;0f983e3e5be1:40409 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-08T00:48:01,726 INFO [M:0;0f983e3e5be1:40409 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-08T00:48:01,726 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-08T00:48:01,730 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40409-0x10002f118c80000, quorum=127.0.0.1:49878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-08T00:48:01,730 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40409-0x10002f118c80000, quorum=127.0.0.1:49878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:48:01,730 DEBUG [M:0;0f983e3e5be1:40409 {}] zookeeper.ZKUtil(347): master:40409-0x10002f118c80000, quorum=127.0.0.1:49878, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-08T00:48:01,731 WARN [M:0;0f983e3e5be1:40409 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-08T00:48:01,732 INFO [M:0;0f983e3e5be1:40409 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/.lastflushedseqids 2024-12-08T00:48:01,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33503 is added to blk_1073741854_1030 (size=130) 2024-12-08T00:48:01,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33661 is added to blk_1073741854_1030 (size=130) 2024-12-08T00:48:01,745 INFO [M:0;0f983e3e5be1:40409 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-08T00:48:01,746 INFO [M:0;0f983e3e5be1:40409 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-08T00:48:01,746 DEBUG [M:0;0f983e3e5be1:40409 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-08T00:48:01,746 INFO [M:0;0f983e3e5be1:40409 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:48:01,746 DEBUG [M:0;0f983e3e5be1:40409 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:48:01,746 DEBUG [M:0;0f983e3e5be1:40409 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-08T00:48:01,746 DEBUG [M:0;0f983e3e5be1:40409 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:48:01,746 INFO [M:0;0f983e3e5be1:40409 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.01 KB heapSize=29.18 KB 2024-12-08T00:48:01,764 DEBUG [M:0;0f983e3e5be1:40409 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0c3c69e7c3ee4c8dbc1640b99f43a51d is 82, key is hbase:meta,,1/info:regioninfo/1733618788228/Put/seqid=0 2024-12-08T00:48:01,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33661 is added to blk_1073741855_1031 (size=5672) 2024-12-08T00:48:01,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33503 is added to blk_1073741855_1031 (size=5672) 2024-12-08T00:48:01,770 INFO [M:0;0f983e3e5be1:40409 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0c3c69e7c3ee4c8dbc1640b99f43a51d 2024-12-08T00:48:01,789 DEBUG [M:0;0f983e3e5be1:40409 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c9df2cf631324640aa9b77a098d3340b is 765, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733618789062/Put/seqid=0 2024-12-08T00:48:01,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33503 is added to blk_1073741856_1032 (size=6246) 2024-12-08T00:48:01,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33661 is added to blk_1073741856_1032 (size=6246) 2024-12-08T00:48:01,795 INFO [M:0;0f983e3e5be1:40409 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.41 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c9df2cf631324640aa9b77a098d3340b 2024-12-08T00:48:01,802 INFO [M:0;0f983e3e5be1:40409 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for c9df2cf631324640aa9b77a098d3340b 2024-12-08T00:48:01,814 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34109-0x10002f118c80001, quorum=127.0.0.1:49878, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T00:48:01,814 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34109-0x10002f118c80001, quorum=127.0.0.1:49878, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T00:48:01,815 INFO [RS:0;0f983e3e5be1:34109 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T00:48:01,815 INFO [RS:0;0f983e3e5be1:34109 {}] regionserver.HRegionServer(1031): Exiting; stopping=0f983e3e5be1,34109,1733618786289; zookeeper connection closed. 2024-12-08T00:48:01,815 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@450cdcd1 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@450cdcd1 2024-12-08T00:48:01,816 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-08T00:48:01,817 DEBUG [M:0;0f983e3e5be1:40409 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f1aeef23b4894313aed8da69d590f64f is 69, key is 0f983e3e5be1,34109,1733618786289/rs:state/1733618787408/Put/seqid=0 2024-12-08T00:48:01,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33661 is added to blk_1073741857_1033 (size=5156) 2024-12-08T00:48:01,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33503 is added to blk_1073741857_1033 (size=5156) 2024-12-08T00:48:01,823 INFO [M:0;0f983e3e5be1:40409 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f1aeef23b4894313aed8da69d590f64f 2024-12-08T00:48:01,843 DEBUG [M:0;0f983e3e5be1:40409 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/eaeddccad7cc4bf8b02b0bee034ec33c is 52, key is load_balancer_on/state:d/1733618788569/Put/seqid=0 2024-12-08T00:48:01,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33503 is added to blk_1073741858_1034 (size=5056) 2024-12-08T00:48:01,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33661 is added to blk_1073741858_1034 (size=5056) 2024-12-08T00:48:01,850 INFO [M:0;0f983e3e5be1:40409 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/eaeddccad7cc4bf8b02b0bee034ec33c 2024-12-08T00:48:01,859 DEBUG [M:0;0f983e3e5be1:40409 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0c3c69e7c3ee4c8dbc1640b99f43a51d as hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/0c3c69e7c3ee4c8dbc1640b99f43a51d 2024-12-08T00:48:01,866 INFO [M:0;0f983e3e5be1:40409 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/0c3c69e7c3ee4c8dbc1640b99f43a51d, entries=8, sequenceid=59, filesize=5.5 K 2024-12-08T00:48:01,867 DEBUG [M:0;0f983e3e5be1:40409 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c9df2cf631324640aa9b77a098d3340b as hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c9df2cf631324640aa9b77a098d3340b 2024-12-08T00:48:01,874 INFO [M:0;0f983e3e5be1:40409 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for c9df2cf631324640aa9b77a098d3340b 2024-12-08T00:48:01,874 INFO [M:0;0f983e3e5be1:40409 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c9df2cf631324640aa9b77a098d3340b, entries=6, sequenceid=59, filesize=6.1 K 2024-12-08T00:48:01,875 DEBUG [M:0;0f983e3e5be1:40409 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f1aeef23b4894313aed8da69d590f64f as hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/f1aeef23b4894313aed8da69d590f64f 2024-12-08T00:48:01,881 INFO [M:0;0f983e3e5be1:40409 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/f1aeef23b4894313aed8da69d590f64f, entries=1, sequenceid=59, filesize=5.0 K 2024-12-08T00:48:01,882 DEBUG [M:0;0f983e3e5be1:40409 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/eaeddccad7cc4bf8b02b0bee034ec33c as hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/eaeddccad7cc4bf8b02b0bee034ec33c 2024-12-08T00:48:01,890 INFO [M:0;0f983e3e5be1:40409 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/eaeddccad7cc4bf8b02b0bee034ec33c, entries=1, sequenceid=59, filesize=4.9 K 2024-12-08T00:48:01,892 INFO [M:0;0f983e3e5be1:40409 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.01 KB/23564, heapSize ~29.12 KB/29816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 146ms, sequenceid=59, compaction requested=false 2024-12-08T00:48:01,894 INFO [M:0;0f983e3e5be1:40409 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:48:01,894 DEBUG [M:0;0f983e3e5be1:40409 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733618881746Disabling compacts and flushes for region at 1733618881746Disabling writes for close at 1733618881746Obtaining lock to block concurrent updates at 1733618881746Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733618881746Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23564, getHeapSize=29816, getOffHeapSize=0, getCellsCount=70 at 1733618881747 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733618881748 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733618881748Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733618881764 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733618881764Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733618881775 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733618881788 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733618881789 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733618881802 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733618881817 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733618881817Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733618881830 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733618881843 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733618881843Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5be05407: reopening flushed file at 1733618881857 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@12b017c4: reopening flushed file at 1733618881866 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@42b9b0e: reopening flushed file at 1733618881874 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2e171017: reopening flushed file at 1733618881881 (+7 ms)Finished flush of dataSize ~23.01 KB/23564, heapSize ~29.12 KB/29816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 146ms, sequenceid=59, compaction requested=false at 1733618881892 (+11 ms)Writing region close event to WAL at 1733618881894 (+2 ms)Closed at 1733618881894 2024-12-08T00:48:01,895 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:01,895 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:01,895 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:01,895 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:01,895 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:01,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33661 is added to blk_1073741830_1006 (size=27961) 2024-12-08T00:48:01,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33503 is added to blk_1073741830_1006 (size=27961) 2024-12-08T00:48:01,898 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T00:48:01,898 INFO [M:0;0f983e3e5be1:40409 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-08T00:48:01,898 INFO [M:0;0f983e3e5be1:40409 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40409 2024-12-08T00:48:01,899 INFO [M:0;0f983e3e5be1:40409 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T00:48:02,029 INFO [M:0;0f983e3e5be1:40409 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T00:48:02,029 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40409-0x10002f118c80000, quorum=127.0.0.1:49878, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T00:48:02,029 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40409-0x10002f118c80000, quorum=127.0.0.1:49878, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T00:48:02,082 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@55d18735{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T00:48:02,085 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@15370523{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T00:48:02,085 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T00:48:02,086 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@335a4f9a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T00:48:02,086 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5ae73635{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/52e2be55-3edc-d50d-dd17-5d14a52ab0cc/hadoop.log.dir/,STOPPED} 2024-12-08T00:48:02,089 WARN [BP-1861119566-172.17.0.2-1733618782229 heartbeating to localhost/127.0.0.1:44103 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T00:48:02,089 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T00:48:02,089 WARN [BP-1861119566-172.17.0.2-1733618782229 heartbeating to localhost/127.0.0.1:44103 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1861119566-172.17.0.2-1733618782229 (Datanode Uuid f4037960-4842-4e5e-8144-af952f06bc4a) service to localhost/127.0.0.1:44103 2024-12-08T00:48:02,089 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T00:48:02,091 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/52e2be55-3edc-d50d-dd17-5d14a52ab0cc/cluster_a09d45df-b862-74bd-d733-bc6cec0b7968/data/data3/current/BP-1861119566-172.17.0.2-1733618782229 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T00:48:02,091 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/52e2be55-3edc-d50d-dd17-5d14a52ab0cc/cluster_a09d45df-b862-74bd-d733-bc6cec0b7968/data/data4/current/BP-1861119566-172.17.0.2-1733618782229 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T00:48:02,091 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T00:48:02,093 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@59e63bea{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T00:48:02,094 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@264a9341{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T00:48:02,094 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T00:48:02,094 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5bdc1e47{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T00:48:02,094 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2aa5bb6e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/52e2be55-3edc-d50d-dd17-5d14a52ab0cc/hadoop.log.dir/,STOPPED} 2024-12-08T00:48:02,095 WARN [BP-1861119566-172.17.0.2-1733618782229 heartbeating to localhost/127.0.0.1:44103 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T00:48:02,095 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T00:48:02,095 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T00:48:02,095 WARN [BP-1861119566-172.17.0.2-1733618782229 heartbeating to localhost/127.0.0.1:44103 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1861119566-172.17.0.2-1733618782229 (Datanode Uuid 8f21831f-3582-4153-93a7-2deb4a8e217d) service to localhost/127.0.0.1:44103 2024-12-08T00:48:02,096 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/52e2be55-3edc-d50d-dd17-5d14a52ab0cc/cluster_a09d45df-b862-74bd-d733-bc6cec0b7968/data/data1/current/BP-1861119566-172.17.0.2-1733618782229 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T00:48:02,096 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/52e2be55-3edc-d50d-dd17-5d14a52ab0cc/cluster_a09d45df-b862-74bd-d733-bc6cec0b7968/data/data2/current/BP-1861119566-172.17.0.2-1733618782229 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T00:48:02,096 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T00:48:02,107 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4c77270f{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-08T00:48:02,108 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c5145e6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T00:48:02,108 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T00:48:02,108 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@46a86f8c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T00:48:02,108 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2f2ab976{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/52e2be55-3edc-d50d-dd17-5d14a52ab0cc/hadoop.log.dir/,STOPPED} 2024-12-08T00:48:02,115 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-08T00:48:02,143 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-08T00:48:02,151 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=81 (was 12) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:44103 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:44103 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@4b736004 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/0f983e3e5be1:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: LeaseRenewer:jenkins@localhost:44103 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44103 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:44103 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44103 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/0f983e3e5be1:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44103 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:44103 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: regionserver/0f983e3e5be1:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) - Thread LEAK? -, OpenFileDescriptor=402 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=49 (was 164), ProcessCount=11 (was 11), AvailableMemoryMB=17905 (was 18454) 2024-12-08T00:48:02,156 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=82, OpenFileDescriptor=402, MaxFileDescriptor=1048576, SystemLoadAverage=49, ProcessCount=11, AvailableMemoryMB=17904 2024-12-08T00:48:02,157 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-08T00:48:02,157 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/52e2be55-3edc-d50d-dd17-5d14a52ab0cc/hadoop.log.dir so I do NOT create it in target/test-data/b10cb6e4-f80e-6e45-a74f-d464d7f5cb43 2024-12-08T00:48:02,157 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/52e2be55-3edc-d50d-dd17-5d14a52ab0cc/hadoop.tmp.dir so I do NOT create it in target/test-data/b10cb6e4-f80e-6e45-a74f-d464d7f5cb43 2024-12-08T00:48:02,157 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b10cb6e4-f80e-6e45-a74f-d464d7f5cb43/cluster_6af9d43c-ffe1-1a4c-1e8f-0a2feb81a439, deleteOnExit=true 2024-12-08T00:48:02,157 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-08T00:48:02,157 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b10cb6e4-f80e-6e45-a74f-d464d7f5cb43/test.cache.data in system properties and HBase conf 2024-12-08T00:48:02,157 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b10cb6e4-f80e-6e45-a74f-d464d7f5cb43/hadoop.tmp.dir in system properties and HBase conf 2024-12-08T00:48:02,157 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b10cb6e4-f80e-6e45-a74f-d464d7f5cb43/hadoop.log.dir in system properties and HBase conf 2024-12-08T00:48:02,158 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b10cb6e4-f80e-6e45-a74f-d464d7f5cb43/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-08T00:48:02,158 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b10cb6e4-f80e-6e45-a74f-d464d7f5cb43/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-08T00:48:02,158 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-08T00:48:02,158 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-08T00:48:02,158 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b10cb6e4-f80e-6e45-a74f-d464d7f5cb43/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-08T00:48:02,158 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b10cb6e4-f80e-6e45-a74f-d464d7f5cb43/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-08T00:48:02,158 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b10cb6e4-f80e-6e45-a74f-d464d7f5cb43/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-08T00:48:02,158 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b10cb6e4-f80e-6e45-a74f-d464d7f5cb43/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-08T00:48:02,158 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b10cb6e4-f80e-6e45-a74f-d464d7f5cb43/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-08T00:48:02,158 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b10cb6e4-f80e-6e45-a74f-d464d7f5cb43/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-08T00:48:02,158 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b10cb6e4-f80e-6e45-a74f-d464d7f5cb43/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-08T00:48:02,158 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b10cb6e4-f80e-6e45-a74f-d464d7f5cb43/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-08T00:48:02,159 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b10cb6e4-f80e-6e45-a74f-d464d7f5cb43/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-08T00:48:02,159 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b10cb6e4-f80e-6e45-a74f-d464d7f5cb43/nfs.dump.dir in system properties and HBase conf 2024-12-08T00:48:02,159 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b10cb6e4-f80e-6e45-a74f-d464d7f5cb43/java.io.tmpdir in system properties and HBase conf 2024-12-08T00:48:02,159 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b10cb6e4-f80e-6e45-a74f-d464d7f5cb43/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-08T00:48:02,159 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b10cb6e4-f80e-6e45-a74f-d464d7f5cb43/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-08T00:48:02,159 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b10cb6e4-f80e-6e45-a74f-d464d7f5cb43/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-08T00:48:02,171 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-08T00:48:02,491 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T00:48:02,495 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T00:48:02,496 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T00:48:02,496 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T00:48:02,496 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-08T00:48:02,497 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T00:48:02,497 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@383a5779{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b10cb6e4-f80e-6e45-a74f-d464d7f5cb43/hadoop.log.dir/,AVAILABLE} 2024-12-08T00:48:02,498 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2fbdac8b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T00:48:02,588 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@f407acd{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b10cb6e4-f80e-6e45-a74f-d464d7f5cb43/java.io.tmpdir/jetty-localhost-43957-hadoop-hdfs-3_4_1-tests_jar-_-any-16716995226895211639/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-08T00:48:02,589 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@685a66bc{HTTP/1.1, (http/1.1)}{localhost:43957} 2024-12-08T00:48:02,589 INFO [Time-limited test {}] server.Server(415): Started @102019ms 2024-12-08T00:48:02,600 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-08T00:48:02,822 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T00:48:02,827 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T00:48:02,828 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T00:48:02,828 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T00:48:02,828 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-08T00:48:02,829 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2d31b0cf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b10cb6e4-f80e-6e45-a74f-d464d7f5cb43/hadoop.log.dir/,AVAILABLE} 2024-12-08T00:48:02,829 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@72a59bd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T00:48:02,920 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6aa2a76b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b10cb6e4-f80e-6e45-a74f-d464d7f5cb43/java.io.tmpdir/jetty-localhost-44953-hadoop-hdfs-3_4_1-tests_jar-_-any-17618943142170142645/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T00:48:02,920 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7ba10be8{HTTP/1.1, (http/1.1)}{localhost:44953} 2024-12-08T00:48:02,920 INFO [Time-limited test {}] server.Server(415): Started @102350ms 2024-12-08T00:48:02,922 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T00:48:02,955 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T00:48:02,959 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T00:48:02,959 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T00:48:02,960 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T00:48:02,960 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-08T00:48:02,960 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5c6f09a0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b10cb6e4-f80e-6e45-a74f-d464d7f5cb43/hadoop.log.dir/,AVAILABLE} 2024-12-08T00:48:02,960 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3509b1e6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T00:48:03,050 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5a8d969a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b10cb6e4-f80e-6e45-a74f-d464d7f5cb43/java.io.tmpdir/jetty-localhost-38957-hadoop-hdfs-3_4_1-tests_jar-_-any-10771879733912470387/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T00:48:03,050 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3bfa6a9b{HTTP/1.1, (http/1.1)}{localhost:38957} 2024-12-08T00:48:03,050 INFO [Time-limited test {}] server.Server(415): Started @102480ms 2024-12-08T00:48:03,052 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T00:48:03,701 WARN [Thread-453 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b10cb6e4-f80e-6e45-a74f-d464d7f5cb43/cluster_6af9d43c-ffe1-1a4c-1e8f-0a2feb81a439/data/data1/current/BP-1919488677-172.17.0.2-1733618882182/current, will proceed with Du for space computation calculation, 2024-12-08T00:48:03,701 WARN [Thread-454 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b10cb6e4-f80e-6e45-a74f-d464d7f5cb43/cluster_6af9d43c-ffe1-1a4c-1e8f-0a2feb81a439/data/data2/current/BP-1919488677-172.17.0.2-1733618882182/current, will proceed with Du for space computation calculation, 2024-12-08T00:48:03,720 WARN [Thread-417 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T00:48:03,723 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfca19f5e33f6b6bb with lease ID 0xd2ff0a2c2c3900b: Processing first storage report for DS-1771664e-91ec-4da2-8366-87149da0b276 from datanode DatanodeRegistration(127.0.0.1:45159, datanodeUuid=3e11dc30-f146-4b76-b43d-6b37a4dffcb8, infoPort=36355, infoSecurePort=0, ipcPort=42605, storageInfo=lv=-57;cid=testClusterID;nsid=242914900;c=1733618882182) 2024-12-08T00:48:03,723 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfca19f5e33f6b6bb with lease ID 0xd2ff0a2c2c3900b: from storage DS-1771664e-91ec-4da2-8366-87149da0b276 node DatanodeRegistration(127.0.0.1:45159, datanodeUuid=3e11dc30-f146-4b76-b43d-6b37a4dffcb8, infoPort=36355, infoSecurePort=0, ipcPort=42605, storageInfo=lv=-57;cid=testClusterID;nsid=242914900;c=1733618882182), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T00:48:03,723 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfca19f5e33f6b6bb with lease ID 0xd2ff0a2c2c3900b: Processing first storage report for DS-3b55e15f-3689-4214-a24b-b8069b04aef7 from datanode DatanodeRegistration(127.0.0.1:45159, datanodeUuid=3e11dc30-f146-4b76-b43d-6b37a4dffcb8, infoPort=36355, infoSecurePort=0, ipcPort=42605, storageInfo=lv=-57;cid=testClusterID;nsid=242914900;c=1733618882182) 2024-12-08T00:48:03,723 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfca19f5e33f6b6bb with lease ID 0xd2ff0a2c2c3900b: from storage DS-3b55e15f-3689-4214-a24b-b8069b04aef7 node DatanodeRegistration(127.0.0.1:45159, datanodeUuid=3e11dc30-f146-4b76-b43d-6b37a4dffcb8, infoPort=36355, infoSecurePort=0, ipcPort=42605, storageInfo=lv=-57;cid=testClusterID;nsid=242914900;c=1733618882182), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T00:48:03,789 WARN [Thread-464 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b10cb6e4-f80e-6e45-a74f-d464d7f5cb43/cluster_6af9d43c-ffe1-1a4c-1e8f-0a2feb81a439/data/data3/current/BP-1919488677-172.17.0.2-1733618882182/current, will proceed with Du for space computation calculation, 2024-12-08T00:48:03,789 WARN [Thread-465 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b10cb6e4-f80e-6e45-a74f-d464d7f5cb43/cluster_6af9d43c-ffe1-1a4c-1e8f-0a2feb81a439/data/data4/current/BP-1919488677-172.17.0.2-1733618882182/current, will proceed with Du for space computation calculation, 2024-12-08T00:48:03,804 WARN [Thread-440 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T00:48:03,807 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc956cbfff3fcb382 with lease ID 0xd2ff0a2c2c3900c: Processing first storage report for DS-81ab795b-d8db-433d-877a-c85e56c7912b from datanode DatanodeRegistration(127.0.0.1:44715, datanodeUuid=e65f2eeb-09db-4e48-a20d-c2a370227aaf, infoPort=35921, infoSecurePort=0, ipcPort=36545, storageInfo=lv=-57;cid=testClusterID;nsid=242914900;c=1733618882182) 2024-12-08T00:48:03,807 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc956cbfff3fcb382 with lease ID 0xd2ff0a2c2c3900c: from storage DS-81ab795b-d8db-433d-877a-c85e56c7912b node DatanodeRegistration(127.0.0.1:44715, datanodeUuid=e65f2eeb-09db-4e48-a20d-c2a370227aaf, infoPort=35921, infoSecurePort=0, ipcPort=36545, storageInfo=lv=-57;cid=testClusterID;nsid=242914900;c=1733618882182), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-08T00:48:03,807 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc956cbfff3fcb382 with lease ID 0xd2ff0a2c2c3900c: Processing first storage report for DS-f8ce9e4c-0689-435c-b8be-446293a426b5 from datanode DatanodeRegistration(127.0.0.1:44715, datanodeUuid=e65f2eeb-09db-4e48-a20d-c2a370227aaf, infoPort=35921, infoSecurePort=0, ipcPort=36545, storageInfo=lv=-57;cid=testClusterID;nsid=242914900;c=1733618882182) 2024-12-08T00:48:03,807 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc956cbfff3fcb382 with lease ID 0xd2ff0a2c2c3900c: from storage DS-f8ce9e4c-0689-435c-b8be-446293a426b5 node DatanodeRegistration(127.0.0.1:44715, datanodeUuid=e65f2eeb-09db-4e48-a20d-c2a370227aaf, infoPort=35921, infoSecurePort=0, ipcPort=36545, storageInfo=lv=-57;cid=testClusterID;nsid=242914900;c=1733618882182), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T00:48:03,883 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b10cb6e4-f80e-6e45-a74f-d464d7f5cb43 2024-12-08T00:48:03,906 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b10cb6e4-f80e-6e45-a74f-d464d7f5cb43/cluster_6af9d43c-ffe1-1a4c-1e8f-0a2feb81a439/zookeeper_0, clientPort=56067, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b10cb6e4-f80e-6e45-a74f-d464d7f5cb43/cluster_6af9d43c-ffe1-1a4c-1e8f-0a2feb81a439/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b10cb6e4-f80e-6e45-a74f-d464d7f5cb43/cluster_6af9d43c-ffe1-1a4c-1e8f-0a2feb81a439/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-08T00:48:03,908 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=56067 2024-12-08T00:48:03,908 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:48:03,911 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:48:03,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44715 is added to blk_1073741825_1001 (size=7) 2024-12-08T00:48:03,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741825_1001 (size=7) 2024-12-08T00:48:03,924 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:41579/user/jenkins/test-data/46db3ca7-1f5f-cc72-ddba-4f78c39fc9f0 with version=8 2024-12-08T00:48:03,925 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/hbase-staging 2024-12-08T00:48:03,926 INFO [Time-limited test {}] client.ConnectionUtils(128): master/0f983e3e5be1:0 server-side Connection retries=45 2024-12-08T00:48:03,927 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T00:48:03,927 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T00:48:03,927 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T00:48:03,927 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T00:48:03,927 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T00:48:03,927 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-08T00:48:03,927 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T00:48:03,928 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41649 2024-12-08T00:48:03,929 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:41649 connecting to ZooKeeper ensemble=127.0.0.1:56067 2024-12-08T00:48:03,976 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:416490x0, quorum=127.0.0.1:56067, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T00:48:03,977 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41649-0x10002f29bdd0000 connected 2024-12-08T00:48:04,056 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:48:04,060 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:48:04,064 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41649-0x10002f29bdd0000, quorum=127.0.0.1:56067, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T00:48:04,064 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:41579/user/jenkins/test-data/46db3ca7-1f5f-cc72-ddba-4f78c39fc9f0, hbase.cluster.distributed=false 2024-12-08T00:48:04,067 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41649-0x10002f29bdd0000, quorum=127.0.0.1:56067, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T00:48:04,067 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41649 2024-12-08T00:48:04,068 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41649 2024-12-08T00:48:04,068 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41649 2024-12-08T00:48:04,068 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41649 2024-12-08T00:48:04,069 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41649 2024-12-08T00:48:04,086 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/0f983e3e5be1:0 server-side Connection retries=45 2024-12-08T00:48:04,086 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T00:48:04,086 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T00:48:04,086 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T00:48:04,086 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T00:48:04,087 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T00:48:04,087 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-08T00:48:04,087 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T00:48:04,087 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46241 2024-12-08T00:48:04,089 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46241 connecting to ZooKeeper ensemble=127.0.0.1:56067 2024-12-08T00:48:04,090 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:48:04,093 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:48:04,105 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:462410x0, quorum=127.0.0.1:56067, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T00:48:04,105 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46241-0x10002f29bdd0001 connected 2024-12-08T00:48:04,105 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46241-0x10002f29bdd0001, quorum=127.0.0.1:56067, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T00:48:04,106 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-08T00:48:04,106 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-08T00:48:04,107 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46241-0x10002f29bdd0001, quorum=127.0.0.1:56067, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-08T00:48:04,109 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46241-0x10002f29bdd0001, quorum=127.0.0.1:56067, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T00:48:04,109 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46241 2024-12-08T00:48:04,109 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46241 2024-12-08T00:48:04,111 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46241 2024-12-08T00:48:04,112 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46241 2024-12-08T00:48:04,112 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46241 2024-12-08T00:48:04,125 DEBUG [M:0;0f983e3e5be1:41649 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;0f983e3e5be1:41649 2024-12-08T00:48:04,126 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/0f983e3e5be1,41649,1733618883926 2024-12-08T00:48:04,137 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46241-0x10002f29bdd0001, quorum=127.0.0.1:56067, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T00:48:04,137 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41649-0x10002f29bdd0000, quorum=127.0.0.1:56067, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T00:48:04,137 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41649-0x10002f29bdd0000, quorum=127.0.0.1:56067, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/0f983e3e5be1,41649,1733618883926 2024-12-08T00:48:04,146 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41649-0x10002f29bdd0000, quorum=127.0.0.1:56067, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:48:04,146 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46241-0x10002f29bdd0001, quorum=127.0.0.1:56067, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-08T00:48:04,146 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46241-0x10002f29bdd0001, quorum=127.0.0.1:56067, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:48:04,147 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41649-0x10002f29bdd0000, quorum=127.0.0.1:56067, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-08T00:48:04,147 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/0f983e3e5be1,41649,1733618883926 from backup master directory 2024-12-08T00:48:04,154 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46241-0x10002f29bdd0001, quorum=127.0.0.1:56067, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T00:48:04,154 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41649-0x10002f29bdd0000, quorum=127.0.0.1:56067, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/0f983e3e5be1,41649,1733618883926 2024-12-08T00:48:04,154 WARN [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T00:48:04,154 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41649-0x10002f29bdd0000, quorum=127.0.0.1:56067, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T00:48:04,154 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=0f983e3e5be1,41649,1733618883926 2024-12-08T00:48:04,159 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:41579/user/jenkins/test-data/46db3ca7-1f5f-cc72-ddba-4f78c39fc9f0/hbase.id] with ID: 1d1b461b-1b33-41be-8927-4209f9e6a3ac 2024-12-08T00:48:04,159 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:41579/user/jenkins/test-data/46db3ca7-1f5f-cc72-ddba-4f78c39fc9f0/.tmp/hbase.id 2024-12-08T00:48:04,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44715 is added to blk_1073741826_1002 (size=42) 2024-12-08T00:48:04,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741826_1002 (size=42) 2024-12-08T00:48:04,165 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:41579/user/jenkins/test-data/46db3ca7-1f5f-cc72-ddba-4f78c39fc9f0/.tmp/hbase.id]:[hdfs://localhost:41579/user/jenkins/test-data/46db3ca7-1f5f-cc72-ddba-4f78c39fc9f0/hbase.id] 2024-12-08T00:48:04,179 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:48:04,179 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-08T00:48:04,181 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-12-08T00:48:04,188 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41649-0x10002f29bdd0000, quorum=127.0.0.1:56067, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:48:04,188 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46241-0x10002f29bdd0001, quorum=127.0.0.1:56067, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:48:04,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741827_1003 (size=196) 2024-12-08T00:48:04,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44715 is added to blk_1073741827_1003 (size=196) 2024-12-08T00:48:04,199 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T00:48:04,200 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-08T00:48:04,200 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T00:48:04,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44715 is added to blk_1073741828_1004 (size=1189) 2024-12-08T00:48:04,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741828_1004 (size=1189) 2024-12-08T00:48:04,209 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:41579/user/jenkins/test-data/46db3ca7-1f5f-cc72-ddba-4f78c39fc9f0/MasterData/data/master/store 2024-12-08T00:48:04,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44715 is added to blk_1073741829_1005 (size=34) 2024-12-08T00:48:04,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741829_1005 (size=34) 2024-12-08T00:48:04,220 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:48:04,220 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-08T00:48:04,220 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:48:04,220 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:48:04,220 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-08T00:48:04,220 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:48:04,220 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:48:04,221 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733618884220Disabling compacts and flushes for region at 1733618884220Disabling writes for close at 1733618884220Writing region close event to WAL at 1733618884220Closed at 1733618884220 2024-12-08T00:48:04,222 WARN [master/0f983e3e5be1:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:41579/user/jenkins/test-data/46db3ca7-1f5f-cc72-ddba-4f78c39fc9f0/MasterData/data/master/store/.initializing 2024-12-08T00:48:04,222 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41579/user/jenkins/test-data/46db3ca7-1f5f-cc72-ddba-4f78c39fc9f0/MasterData/WALs/0f983e3e5be1,41649,1733618883926 2024-12-08T00:48:04,225 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0f983e3e5be1%2C41649%2C1733618883926, suffix=, logDir=hdfs://localhost:41579/user/jenkins/test-data/46db3ca7-1f5f-cc72-ddba-4f78c39fc9f0/MasterData/WALs/0f983e3e5be1,41649,1733618883926, archiveDir=hdfs://localhost:41579/user/jenkins/test-data/46db3ca7-1f5f-cc72-ddba-4f78c39fc9f0/MasterData/oldWALs, maxLogs=10 2024-12-08T00:48:04,225 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0f983e3e5be1%2C41649%2C1733618883926.1733618884225 2024-12-08T00:48:04,230 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/46db3ca7-1f5f-cc72-ddba-4f78c39fc9f0/MasterData/WALs/0f983e3e5be1,41649,1733618883926/0f983e3e5be1%2C41649%2C1733618883926.1733618884225 2024-12-08T00:48:04,231 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36355:36355),(127.0.0.1/127.0.0.1:35921:35921)] 2024-12-08T00:48:04,232 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-08T00:48:04,232 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:48:04,232 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:48:04,232 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:48:04,234 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:48:04,235 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-08T00:48:04,236 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:48:04,236 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:48:04,236 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:48:04,238 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-08T00:48:04,238 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:48:04,238 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T00:48:04,238 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:48:04,241 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-08T00:48:04,241 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:48:04,241 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T00:48:04,242 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:48:04,243 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-08T00:48:04,243 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:48:04,244 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T00:48:04,244 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:48:04,245 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41579/user/jenkins/test-data/46db3ca7-1f5f-cc72-ddba-4f78c39fc9f0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:48:04,246 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41579/user/jenkins/test-data/46db3ca7-1f5f-cc72-ddba-4f78c39fc9f0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:48:04,248 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:48:04,248 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:48:04,249 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-08T00:48:04,250 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:48:04,254 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41579/user/jenkins/test-data/46db3ca7-1f5f-cc72-ddba-4f78c39fc9f0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T00:48:04,255 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=688320, jitterRate=-0.1247558742761612}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-08T00:48:04,257 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733618884233Initializing all the Stores at 1733618884234 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733618884234Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733618884234Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733618884234Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733618884234Cleaning up temporary data from old regions at 1733618884248 (+14 ms)Region opened successfully at 1733618884257 (+9 ms) 2024-12-08T00:48:04,258 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-08T00:48:04,264 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2aad27d1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0f983e3e5be1/172.17.0.2:0 2024-12-08T00:48:04,265 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-08T00:48:04,265 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-08T00:48:04,265 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-08T00:48:04,265 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-08T00:48:04,266 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-08T00:48:04,267 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-08T00:48:04,267 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-08T00:48:04,270 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-08T00:48:04,271 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41649-0x10002f29bdd0000, quorum=127.0.0.1:56067, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-08T00:48:04,304 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-08T00:48:04,305 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-08T00:48:04,306 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41649-0x10002f29bdd0000, quorum=127.0.0.1:56067, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-08T00:48:04,313 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-08T00:48:04,313 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-08T00:48:04,315 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41649-0x10002f29bdd0000, quorum=127.0.0.1:56067, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-08T00:48:04,321 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-08T00:48:04,323 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41649-0x10002f29bdd0000, quorum=127.0.0.1:56067, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-08T00:48:04,329 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-08T00:48:04,333 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41649-0x10002f29bdd0000, quorum=127.0.0.1:56067, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-08T00:48:04,337 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-08T00:48:04,346 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41649-0x10002f29bdd0000, quorum=127.0.0.1:56067, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T00:48:04,346 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46241-0x10002f29bdd0001, quorum=127.0.0.1:56067, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T00:48:04,346 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41649-0x10002f29bdd0000, quorum=127.0.0.1:56067, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:48:04,346 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46241-0x10002f29bdd0001, quorum=127.0.0.1:56067, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:48:04,347 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=0f983e3e5be1,41649,1733618883926, sessionid=0x10002f29bdd0000, setting cluster-up flag (Was=false) 2024-12-08T00:48:04,363 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41649-0x10002f29bdd0000, quorum=127.0.0.1:56067, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:48:04,363 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46241-0x10002f29bdd0001, quorum=127.0.0.1:56067, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:48:04,388 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-08T00:48:04,391 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0f983e3e5be1,41649,1733618883926 2024-12-08T00:48:04,413 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41649-0x10002f29bdd0000, quorum=127.0.0.1:56067, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:48:04,413 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46241-0x10002f29bdd0001, quorum=127.0.0.1:56067, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:48:04,438 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-08T00:48:04,441 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0f983e3e5be1,41649,1733618883926 2024-12-08T00:48:04,444 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:41579/user/jenkins/test-data/46db3ca7-1f5f-cc72-ddba-4f78c39fc9f0/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-08T00:48:04,449 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-08T00:48:04,449 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-08T00:48:04,450 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-08T00:48:04,450 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 0f983e3e5be1,41649,1733618883926 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-08T00:48:04,454 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/0f983e3e5be1:0, corePoolSize=5, maxPoolSize=5 2024-12-08T00:48:04,454 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/0f983e3e5be1:0, corePoolSize=5, maxPoolSize=5 2024-12-08T00:48:04,454 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/0f983e3e5be1:0, corePoolSize=5, maxPoolSize=5 2024-12-08T00:48:04,454 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/0f983e3e5be1:0, corePoolSize=5, maxPoolSize=5 2024-12-08T00:48:04,454 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/0f983e3e5be1:0, corePoolSize=10, maxPoolSize=10 2024-12-08T00:48:04,454 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:48:04,454 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/0f983e3e5be1:0, corePoolSize=2, maxPoolSize=2 2024-12-08T00:48:04,454 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:48:04,455 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733618914455 2024-12-08T00:48:04,455 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-08T00:48:04,455 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-08T00:48:04,455 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-08T00:48:04,456 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-08T00:48:04,456 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-08T00:48:04,456 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-08T00:48:04,456 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:04,456 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-08T00:48:04,456 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-08T00:48:04,456 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T00:48:04,456 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-08T00:48:04,456 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-08T00:48:04,457 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-08T00:48:04,457 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-08T00:48:04,457 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/0f983e3e5be1:0:becomeActiveMaster-HFileCleaner.large.0-1733618884457,5,FailOnTimeoutGroup] 2024-12-08T00:48:04,458 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:48:04,458 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-08T00:48:04,458 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/0f983e3e5be1:0:becomeActiveMaster-HFileCleaner.small.0-1733618884457,5,FailOnTimeoutGroup] 2024-12-08T00:48:04,459 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:04,459 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-08T00:48:04,459 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:04,459 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:04,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741831_1007 (size=1321) 2024-12-08T00:48:04,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44715 is added to blk_1073741831_1007 (size=1321) 2024-12-08T00:48:04,466 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:41579/user/jenkins/test-data/46db3ca7-1f5f-cc72-ddba-4f78c39fc9f0/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-08T00:48:04,467 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41579/user/jenkins/test-data/46db3ca7-1f5f-cc72-ddba-4f78c39fc9f0 2024-12-08T00:48:04,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741832_1008 (size=32) 2024-12-08T00:48:04,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44715 is added to blk_1073741832_1008 (size=32) 2024-12-08T00:48:04,473 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:48:04,475 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-08T00:48:04,476 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-08T00:48:04,476 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:48:04,477 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:48:04,477 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-08T00:48:04,478 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-08T00:48:04,479 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:48:04,479 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:48:04,479 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-08T00:48:04,481 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-08T00:48:04,481 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:48:04,481 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:48:04,482 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-08T00:48:04,483 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-08T00:48:04,483 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:48:04,484 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:48:04,484 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-08T00:48:04,485 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41579/user/jenkins/test-data/46db3ca7-1f5f-cc72-ddba-4f78c39fc9f0/data/hbase/meta/1588230740 2024-12-08T00:48:04,485 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41579/user/jenkins/test-data/46db3ca7-1f5f-cc72-ddba-4f78c39fc9f0/data/hbase/meta/1588230740 2024-12-08T00:48:04,487 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-08T00:48:04,487 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-08T00:48:04,487 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-08T00:48:04,488 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-08T00:48:04,491 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41579/user/jenkins/test-data/46db3ca7-1f5f-cc72-ddba-4f78c39fc9f0/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T00:48:04,491 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=715373, jitterRate=-0.09035736322402954}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-08T00:48:04,492 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733618884474Initializing all the Stores at 1733618884474Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733618884474Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733618884475 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733618884475Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733618884475Cleaning up temporary data from old regions at 1733618884487 (+12 ms)Region opened successfully at 1733618884492 (+5 ms) 2024-12-08T00:48:04,492 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-08T00:48:04,492 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-08T00:48:04,492 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-08T00:48:04,493 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-08T00:48:04,493 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-08T00:48:04,493 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-08T00:48:04,493 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733618884492Disabling compacts and flushes for region at 1733618884492Disabling writes for close at 1733618884493 (+1 ms)Writing region close event to WAL at 1733618884493Closed at 1733618884493 2024-12-08T00:48:04,494 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T00:48:04,494 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-08T00:48:04,495 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-08T00:48:04,496 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-08T00:48:04,497 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-08T00:48:04,516 INFO [RS:0;0f983e3e5be1:46241 {}] regionserver.HRegionServer(746): ClusterId : 1d1b461b-1b33-41be-8927-4209f9e6a3ac 2024-12-08T00:48:04,517 DEBUG [RS:0;0f983e3e5be1:46241 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-08T00:48:04,546 DEBUG [RS:0;0f983e3e5be1:46241 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-08T00:48:04,546 DEBUG [RS:0;0f983e3e5be1:46241 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-08T00:48:04,555 DEBUG [RS:0;0f983e3e5be1:46241 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-08T00:48:04,556 DEBUG [RS:0;0f983e3e5be1:46241 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4d5ed54c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0f983e3e5be1/172.17.0.2:0 2024-12-08T00:48:04,566 DEBUG [RS:0;0f983e3e5be1:46241 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;0f983e3e5be1:46241 2024-12-08T00:48:04,566 INFO [RS:0;0f983e3e5be1:46241 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-08T00:48:04,566 INFO [RS:0;0f983e3e5be1:46241 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-08T00:48:04,566 DEBUG [RS:0;0f983e3e5be1:46241 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-08T00:48:04,567 INFO [RS:0;0f983e3e5be1:46241 {}] regionserver.HRegionServer(2659): reportForDuty to master=0f983e3e5be1,41649,1733618883926 with port=46241, startcode=1733618884086 2024-12-08T00:48:04,568 DEBUG [RS:0;0f983e3e5be1:46241 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-08T00:48:04,570 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41175, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-08T00:48:04,571 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41649 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 0f983e3e5be1,46241,1733618884086 2024-12-08T00:48:04,571 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41649 {}] master.ServerManager(517): Registering regionserver=0f983e3e5be1,46241,1733618884086 2024-12-08T00:48:04,573 DEBUG [RS:0;0f983e3e5be1:46241 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41579/user/jenkins/test-data/46db3ca7-1f5f-cc72-ddba-4f78c39fc9f0 2024-12-08T00:48:04,573 DEBUG [RS:0;0f983e3e5be1:46241 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41579 2024-12-08T00:48:04,573 DEBUG [RS:0;0f983e3e5be1:46241 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-08T00:48:04,579 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41649-0x10002f29bdd0000, quorum=127.0.0.1:56067, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T00:48:04,580 DEBUG [RS:0;0f983e3e5be1:46241 {}] zookeeper.ZKUtil(111): regionserver:46241-0x10002f29bdd0001, quorum=127.0.0.1:56067, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/0f983e3e5be1,46241,1733618884086 2024-12-08T00:48:04,580 WARN [RS:0;0f983e3e5be1:46241 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T00:48:04,580 INFO [RS:0;0f983e3e5be1:46241 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T00:48:04,580 DEBUG [RS:0;0f983e3e5be1:46241 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41579/user/jenkins/test-data/46db3ca7-1f5f-cc72-ddba-4f78c39fc9f0/WALs/0f983e3e5be1,46241,1733618884086 2024-12-08T00:48:04,581 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [0f983e3e5be1,46241,1733618884086] 2024-12-08T00:48:04,585 INFO [RS:0;0f983e3e5be1:46241 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-08T00:48:04,587 INFO [RS:0;0f983e3e5be1:46241 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-08T00:48:04,588 INFO [RS:0;0f983e3e5be1:46241 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-08T00:48:04,588 INFO [RS:0;0f983e3e5be1:46241 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:04,588 INFO [RS:0;0f983e3e5be1:46241 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-08T00:48:04,589 INFO [RS:0;0f983e3e5be1:46241 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-08T00:48:04,589 INFO [RS:0;0f983e3e5be1:46241 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:04,589 DEBUG [RS:0;0f983e3e5be1:46241 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:48:04,589 DEBUG [RS:0;0f983e3e5be1:46241 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:48:04,589 DEBUG [RS:0;0f983e3e5be1:46241 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:48:04,589 DEBUG [RS:0;0f983e3e5be1:46241 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:48:04,589 DEBUG [RS:0;0f983e3e5be1:46241 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:48:04,590 DEBUG [RS:0;0f983e3e5be1:46241 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/0f983e3e5be1:0, corePoolSize=2, maxPoolSize=2 2024-12-08T00:48:04,590 DEBUG [RS:0;0f983e3e5be1:46241 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:48:04,590 DEBUG [RS:0;0f983e3e5be1:46241 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:48:04,590 DEBUG [RS:0;0f983e3e5be1:46241 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:48:04,590 DEBUG [RS:0;0f983e3e5be1:46241 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:48:04,590 DEBUG [RS:0;0f983e3e5be1:46241 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:48:04,590 DEBUG [RS:0;0f983e3e5be1:46241 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:48:04,590 DEBUG [RS:0;0f983e3e5be1:46241 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/0f983e3e5be1:0, corePoolSize=3, maxPoolSize=3 2024-12-08T00:48:04,590 DEBUG [RS:0;0f983e3e5be1:46241 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0, corePoolSize=3, maxPoolSize=3 2024-12-08T00:48:04,590 INFO [RS:0;0f983e3e5be1:46241 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:04,590 INFO [RS:0;0f983e3e5be1:46241 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:04,590 INFO [RS:0;0f983e3e5be1:46241 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:04,591 INFO [RS:0;0f983e3e5be1:46241 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:04,591 INFO [RS:0;0f983e3e5be1:46241 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:04,591 INFO [RS:0;0f983e3e5be1:46241 {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,46241,1733618884086-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T00:48:04,603 INFO [RS:0;0f983e3e5be1:46241 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-08T00:48:04,603 INFO [RS:0;0f983e3e5be1:46241 {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,46241,1733618884086-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:04,604 INFO [RS:0;0f983e3e5be1:46241 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:04,604 INFO [RS:0;0f983e3e5be1:46241 {}] regionserver.Replication(171): 0f983e3e5be1,46241,1733618884086 started 2024-12-08T00:48:04,615 INFO [RS:0;0f983e3e5be1:46241 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:04,615 INFO [RS:0;0f983e3e5be1:46241 {}] regionserver.HRegionServer(1482): Serving as 0f983e3e5be1,46241,1733618884086, RpcServer on 0f983e3e5be1/172.17.0.2:46241, sessionid=0x10002f29bdd0001 2024-12-08T00:48:04,615 DEBUG [RS:0;0f983e3e5be1:46241 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-08T00:48:04,615 DEBUG [RS:0;0f983e3e5be1:46241 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 0f983e3e5be1,46241,1733618884086 2024-12-08T00:48:04,615 DEBUG [RS:0;0f983e3e5be1:46241 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0f983e3e5be1,46241,1733618884086' 2024-12-08T00:48:04,615 DEBUG [RS:0;0f983e3e5be1:46241 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-08T00:48:04,616 DEBUG [RS:0;0f983e3e5be1:46241 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-08T00:48:04,617 DEBUG [RS:0;0f983e3e5be1:46241 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-08T00:48:04,617 DEBUG [RS:0;0f983e3e5be1:46241 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-08T00:48:04,617 DEBUG [RS:0;0f983e3e5be1:46241 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 0f983e3e5be1,46241,1733618884086 2024-12-08T00:48:04,617 DEBUG [RS:0;0f983e3e5be1:46241 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0f983e3e5be1,46241,1733618884086' 2024-12-08T00:48:04,617 DEBUG [RS:0;0f983e3e5be1:46241 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-08T00:48:04,617 DEBUG [RS:0;0f983e3e5be1:46241 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-08T00:48:04,618 DEBUG [RS:0;0f983e3e5be1:46241 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-08T00:48:04,618 INFO [RS:0;0f983e3e5be1:46241 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-08T00:48:04,618 INFO [RS:0;0f983e3e5be1:46241 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-08T00:48:04,648 WARN [0f983e3e5be1:41649 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-08T00:48:04,720 INFO [RS:0;0f983e3e5be1:46241 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0f983e3e5be1%2C46241%2C1733618884086, suffix=, logDir=hdfs://localhost:41579/user/jenkins/test-data/46db3ca7-1f5f-cc72-ddba-4f78c39fc9f0/WALs/0f983e3e5be1,46241,1733618884086, archiveDir=hdfs://localhost:41579/user/jenkins/test-data/46db3ca7-1f5f-cc72-ddba-4f78c39fc9f0/oldWALs, maxLogs=32 2024-12-08T00:48:04,722 INFO [RS:0;0f983e3e5be1:46241 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0f983e3e5be1%2C46241%2C1733618884086.1733618884722 2024-12-08T00:48:04,729 INFO [RS:0;0f983e3e5be1:46241 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/46db3ca7-1f5f-cc72-ddba-4f78c39fc9f0/WALs/0f983e3e5be1,46241,1733618884086/0f983e3e5be1%2C46241%2C1733618884086.1733618884722 2024-12-08T00:48:04,731 DEBUG [RS:0;0f983e3e5be1:46241 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35921:35921),(127.0.0.1/127.0.0.1:36355:36355)] 2024-12-08T00:48:04,898 DEBUG [0f983e3e5be1:41649 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-08T00:48:04,899 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=0f983e3e5be1,46241,1733618884086 2024-12-08T00:48:04,902 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0f983e3e5be1,46241,1733618884086, state=OPENING 2024-12-08T00:48:04,913 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-08T00:48:04,921 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41649-0x10002f29bdd0000, quorum=127.0.0.1:56067, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:48:04,921 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46241-0x10002f29bdd0001, quorum=127.0.0.1:56067, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:48:04,923 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-08T00:48:04,923 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T00:48:04,923 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T00:48:04,923 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=0f983e3e5be1,46241,1733618884086}] 2024-12-08T00:48:05,077 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-08T00:48:05,084 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60229, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-08T00:48:05,093 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-08T00:48:05,093 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T00:48:05,096 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0f983e3e5be1%2C46241%2C1733618884086.meta, suffix=.meta, logDir=hdfs://localhost:41579/user/jenkins/test-data/46db3ca7-1f5f-cc72-ddba-4f78c39fc9f0/WALs/0f983e3e5be1,46241,1733618884086, archiveDir=hdfs://localhost:41579/user/jenkins/test-data/46db3ca7-1f5f-cc72-ddba-4f78c39fc9f0/oldWALs, maxLogs=32 2024-12-08T00:48:05,098 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 0f983e3e5be1%2C46241%2C1733618884086.meta.1733618885098.meta 2024-12-08T00:48:05,105 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/46db3ca7-1f5f-cc72-ddba-4f78c39fc9f0/WALs/0f983e3e5be1,46241,1733618884086/0f983e3e5be1%2C46241%2C1733618884086.meta.1733618885098.meta 2024-12-08T00:48:05,106 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35921:35921),(127.0.0.1/127.0.0.1:36355:36355)] 2024-12-08T00:48:05,107 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-08T00:48:05,108 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-08T00:48:05,108 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-08T00:48:05,108 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-08T00:48:05,108 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-08T00:48:05,108 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:48:05,108 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-08T00:48:05,108 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-08T00:48:05,111 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-08T00:48:05,112 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-08T00:48:05,112 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:48:05,113 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:48:05,114 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-08T00:48:05,115 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-08T00:48:05,116 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:48:05,116 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:48:05,116 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-08T00:48:05,118 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-08T00:48:05,118 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:48:05,118 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:48:05,119 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-08T00:48:05,120 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-08T00:48:05,120 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:48:05,120 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:48:05,121 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-08T00:48:05,122 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41579/user/jenkins/test-data/46db3ca7-1f5f-cc72-ddba-4f78c39fc9f0/data/hbase/meta/1588230740 2024-12-08T00:48:05,123 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41579/user/jenkins/test-data/46db3ca7-1f5f-cc72-ddba-4f78c39fc9f0/data/hbase/meta/1588230740 2024-12-08T00:48:05,125 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-08T00:48:05,125 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-08T00:48:05,126 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-08T00:48:05,128 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-08T00:48:05,129 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=698329, jitterRate=-0.11202897131443024}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-08T00:48:05,129 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-08T00:48:05,130 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733618885109Writing region info on filesystem at 1733618885109Initializing all the Stores at 1733618885110 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733618885110Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733618885110Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733618885110Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733618885110Cleaning up temporary data from old regions at 1733618885125 (+15 ms)Running coprocessor post-open hooks at 1733618885129 (+4 ms)Region opened successfully at 1733618885130 (+1 ms) 2024-12-08T00:48:05,131 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733618885077 2024-12-08T00:48:05,134 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-08T00:48:05,134 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-08T00:48:05,135 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=0f983e3e5be1,46241,1733618884086 2024-12-08T00:48:05,136 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0f983e3e5be1,46241,1733618884086, state=OPEN 2024-12-08T00:48:05,184 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46241-0x10002f29bdd0001, quorum=127.0.0.1:56067, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T00:48:05,184 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41649-0x10002f29bdd0000, quorum=127.0.0.1:56067, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T00:48:05,184 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=0f983e3e5be1,46241,1733618884086 2024-12-08T00:48:05,184 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T00:48:05,185 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T00:48:05,191 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-08T00:48:05,191 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=0f983e3e5be1,46241,1733618884086 in 262 msec 2024-12-08T00:48:05,197 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-08T00:48:05,198 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 696 msec 2024-12-08T00:48:05,199 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T00:48:05,199 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-08T00:48:05,201 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-08T00:48:05,201 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=0f983e3e5be1,46241,1733618884086, seqNum=-1] 2024-12-08T00:48:05,202 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:48:05,204 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34727, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:48:05,213 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 765 msec 2024-12-08T00:48:05,213 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733618885213, completionTime=-1 2024-12-08T00:48:05,213 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-08T00:48:05,213 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-08T00:48:05,215 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-08T00:48:05,215 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733618945215 2024-12-08T00:48:05,215 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733619005215 2024-12-08T00:48:05,215 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-12-08T00:48:05,215 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,41649,1733618883926-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:05,215 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,41649,1733618883926-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:05,215 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,41649,1733618883926-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:05,215 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-0f983e3e5be1:41649, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:05,215 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:05,216 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:05,217 DEBUG [master/0f983e3e5be1:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-08T00:48:05,220 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.065sec 2024-12-08T00:48:05,220 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-08T00:48:05,220 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-08T00:48:05,220 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-08T00:48:05,220 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-08T00:48:05,220 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-08T00:48:05,220 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,41649,1733618883926-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T00:48:05,221 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,41649,1733618883926-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-08T00:48:05,223 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-08T00:48:05,223 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-08T00:48:05,223 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,41649,1733618883926-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:05,318 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@39bdb4ef, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:48:05,318 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 0f983e3e5be1,41649,-1 for getting cluster id 2024-12-08T00:48:05,319 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-08T00:48:05,323 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '1d1b461b-1b33-41be-8927-4209f9e6a3ac' 2024-12-08T00:48:05,324 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-08T00:48:05,324 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "1d1b461b-1b33-41be-8927-4209f9e6a3ac" 2024-12-08T00:48:05,325 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@71fba55e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:48:05,325 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [0f983e3e5be1,41649,-1] 2024-12-08T00:48:05,325 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-08T00:48:05,326 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:48:05,327 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42692, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-08T00:48:05,328 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2a676f27, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:48:05,329 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-08T00:48:05,330 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=0f983e3e5be1,46241,1733618884086, seqNum=-1] 2024-12-08T00:48:05,330 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:48:05,332 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47962, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:48:05,334 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=0f983e3e5be1,41649,1733618883926 2024-12-08T00:48:05,334 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:48:05,337 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-08T00:48:05,337 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-08T00:48:05,337 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-08T00:48:05,337 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T00:48:05,338 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:48:05,338 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:48:05,338 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-08T00:48:05,338 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-08T00:48:05,338 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1434811321, stopped=false 2024-12-08T00:48:05,338 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=0f983e3e5be1,41649,1733618883926 2024-12-08T00:48:05,354 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41649-0x10002f29bdd0000, quorum=127.0.0.1:56067, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T00:48:05,354 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46241-0x10002f29bdd0001, quorum=127.0.0.1:56067, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T00:48:05,354 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-08T00:48:05,354 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46241-0x10002f29bdd0001, quorum=127.0.0.1:56067, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:48:05,354 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41649-0x10002f29bdd0000, quorum=127.0.0.1:56067, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:48:05,355 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-08T00:48:05,355 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T00:48:05,355 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41649-0x10002f29bdd0000, quorum=127.0.0.1:56067, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T00:48:05,355 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46241-0x10002f29bdd0001, quorum=127.0.0.1:56067, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T00:48:05,355 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:48:05,355 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '0f983e3e5be1,46241,1733618884086' ***** 2024-12-08T00:48:05,355 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-08T00:48:05,355 INFO [RS:0;0f983e3e5be1:46241 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-08T00:48:05,356 INFO [RS:0;0f983e3e5be1:46241 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-08T00:48:05,356 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-08T00:48:05,356 INFO [RS:0;0f983e3e5be1:46241 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-08T00:48:05,356 INFO [RS:0;0f983e3e5be1:46241 {}] regionserver.HRegionServer(959): stopping server 0f983e3e5be1,46241,1733618884086 2024-12-08T00:48:05,356 INFO [RS:0;0f983e3e5be1:46241 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T00:48:05,356 INFO [RS:0;0f983e3e5be1:46241 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;0f983e3e5be1:46241. 2024-12-08T00:48:05,356 DEBUG [RS:0;0f983e3e5be1:46241 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T00:48:05,356 DEBUG [RS:0;0f983e3e5be1:46241 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:48:05,356 INFO [RS:0;0f983e3e5be1:46241 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-08T00:48:05,356 INFO [RS:0;0f983e3e5be1:46241 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-08T00:48:05,356 INFO [RS:0;0f983e3e5be1:46241 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-08T00:48:05,356 INFO [RS:0;0f983e3e5be1:46241 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-08T00:48:05,356 INFO [RS:0;0f983e3e5be1:46241 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-08T00:48:05,356 DEBUG [RS:0;0f983e3e5be1:46241 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-08T00:48:05,357 DEBUG [RS:0;0f983e3e5be1:46241 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-08T00:48:05,357 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-08T00:48:05,357 INFO [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-08T00:48:05,357 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-08T00:48:05,357 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-08T00:48:05,357 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-08T00:48:05,357 INFO [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-12-08T00:48:05,372 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41579/user/jenkins/test-data/46db3ca7-1f5f-cc72-ddba-4f78c39fc9f0/data/hbase/meta/1588230740/.tmp/ns/110bf65589534183883f69b048e9ee53 is 43, key is default/ns:d/1733618885204/Put/seqid=0 2024-12-08T00:48:05,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741835_1011 (size=5153) 2024-12-08T00:48:05,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44715 is added to blk_1073741835_1011 (size=5153) 2024-12-08T00:48:05,379 INFO [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:41579/user/jenkins/test-data/46db3ca7-1f5f-cc72-ddba-4f78c39fc9f0/data/hbase/meta/1588230740/.tmp/ns/110bf65589534183883f69b048e9ee53 2024-12-08T00:48:05,387 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41579/user/jenkins/test-data/46db3ca7-1f5f-cc72-ddba-4f78c39fc9f0/data/hbase/meta/1588230740/.tmp/ns/110bf65589534183883f69b048e9ee53 as hdfs://localhost:41579/user/jenkins/test-data/46db3ca7-1f5f-cc72-ddba-4f78c39fc9f0/data/hbase/meta/1588230740/ns/110bf65589534183883f69b048e9ee53 2024-12-08T00:48:05,395 INFO [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41579/user/jenkins/test-data/46db3ca7-1f5f-cc72-ddba-4f78c39fc9f0/data/hbase/meta/1588230740/ns/110bf65589534183883f69b048e9ee53, entries=2, sequenceid=6, filesize=5.0 K 2024-12-08T00:48:05,397 INFO [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 40ms, sequenceid=6, compaction requested=false 2024-12-08T00:48:05,397 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-08T00:48:05,402 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41579/user/jenkins/test-data/46db3ca7-1f5f-cc72-ddba-4f78c39fc9f0/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-08T00:48:05,403 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-08T00:48:05,403 INFO [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-08T00:48:05,403 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733618885356Running coprocessor pre-close hooks at 1733618885356Disabling compacts and flushes for region at 1733618885356Disabling writes for close at 1733618885357 (+1 ms)Obtaining lock to block concurrent updates at 1733618885357Preparing flush snapshotting stores in 1588230740 at 1733618885357Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1733618885357Flushing stores of hbase:meta,,1.1588230740 at 1733618885358 (+1 ms)Flushing 1588230740/ns: creating writer at 1733618885358Flushing 1588230740/ns: appending metadata at 1733618885371 (+13 ms)Flushing 1588230740/ns: closing flushed file at 1733618885371Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5b6ac1dd: reopening flushed file at 1733618885386 (+15 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 40ms, sequenceid=6, compaction requested=false at 1733618885397 (+11 ms)Writing region close event to WAL at 1733618885398 (+1 ms)Running coprocessor post-close hooks at 1733618885403 (+5 ms)Closed at 1733618885403 2024-12-08T00:48:05,404 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-08T00:48:05,557 INFO [RS:0;0f983e3e5be1:46241 {}] regionserver.HRegionServer(976): stopping server 0f983e3e5be1,46241,1733618884086; all regions closed. 2024-12-08T00:48:05,558 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:05,559 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:05,559 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:05,559 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:05,560 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:05,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741834_1010 (size=1152) 2024-12-08T00:48:05,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44715 is added to blk_1073741834_1010 (size=1152) 2024-12-08T00:48:05,570 DEBUG [RS:0;0f983e3e5be1:46241 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/46db3ca7-1f5f-cc72-ddba-4f78c39fc9f0/oldWALs 2024-12-08T00:48:05,570 INFO [RS:0;0f983e3e5be1:46241 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 0f983e3e5be1%2C46241%2C1733618884086.meta:.meta(num 1733618885098) 2024-12-08T00:48:05,570 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:05,570 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:05,570 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:05,571 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:05,571 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:05,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741833_1009 (size=93) 2024-12-08T00:48:05,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44715 is added to blk_1073741833_1009 (size=93) 2024-12-08T00:48:05,630 INFO [regionserver/0f983e3e5be1:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-08T00:48:05,631 INFO [regionserver/0f983e3e5be1:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-08T00:48:05,912 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-08T00:48:05,912 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-08T00:48:05,915 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-12-08T00:48:05,978 DEBUG [RS:0;0f983e3e5be1:46241 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/46db3ca7-1f5f-cc72-ddba-4f78c39fc9f0/oldWALs 2024-12-08T00:48:05,978 INFO [RS:0;0f983e3e5be1:46241 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 0f983e3e5be1%2C46241%2C1733618884086:(num 1733618884722) 2024-12-08T00:48:05,979 DEBUG [RS:0;0f983e3e5be1:46241 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:48:05,979 INFO [RS:0;0f983e3e5be1:46241 {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T00:48:05,979 INFO [RS:0;0f983e3e5be1:46241 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T00:48:05,979 INFO [RS:0;0f983e3e5be1:46241 {}] hbase.ChoreService(370): Chore service for: regionserver/0f983e3e5be1:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-08T00:48:05,979 INFO [RS:0;0f983e3e5be1:46241 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T00:48:05,980 INFO [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T00:48:05,980 INFO [RS:0;0f983e3e5be1:46241 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46241 2024-12-08T00:48:06,038 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46241-0x10002f29bdd0001, quorum=127.0.0.1:56067, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/0f983e3e5be1,46241,1733618884086 2024-12-08T00:48:06,038 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41649-0x10002f29bdd0000, quorum=127.0.0.1:56067, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T00:48:06,038 INFO [RS:0;0f983e3e5be1:46241 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T00:48:06,046 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [0f983e3e5be1,46241,1733618884086] 2024-12-08T00:48:06,054 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/0f983e3e5be1,46241,1733618884086 already deleted, retry=false 2024-12-08T00:48:06,054 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 0f983e3e5be1,46241,1733618884086 expired; onlineServers=0 2024-12-08T00:48:06,054 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '0f983e3e5be1,41649,1733618883926' ***** 2024-12-08T00:48:06,054 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-08T00:48:06,054 INFO [M:0;0f983e3e5be1:41649 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T00:48:06,054 INFO [M:0;0f983e3e5be1:41649 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T00:48:06,054 DEBUG [M:0;0f983e3e5be1:41649 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-08T00:48:06,054 DEBUG [M:0;0f983e3e5be1:41649 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-08T00:48:06,054 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-08T00:48:06,054 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster-HFileCleaner.small.0-1733618884457 {}] cleaner.HFileCleaner(306): Exit Thread[master/0f983e3e5be1:0:becomeActiveMaster-HFileCleaner.small.0-1733618884457,5,FailOnTimeoutGroup] 2024-12-08T00:48:06,054 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster-HFileCleaner.large.0-1733618884457 {}] cleaner.HFileCleaner(306): Exit Thread[master/0f983e3e5be1:0:becomeActiveMaster-HFileCleaner.large.0-1733618884457,5,FailOnTimeoutGroup] 2024-12-08T00:48:06,055 INFO [M:0;0f983e3e5be1:41649 {}] hbase.ChoreService(370): Chore service for: master/0f983e3e5be1:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-08T00:48:06,055 INFO [M:0;0f983e3e5be1:41649 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T00:48:06,055 DEBUG [M:0;0f983e3e5be1:41649 {}] master.HMaster(1795): Stopping service threads 2024-12-08T00:48:06,055 INFO [M:0;0f983e3e5be1:41649 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-08T00:48:06,055 INFO [M:0;0f983e3e5be1:41649 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-08T00:48:06,055 INFO [M:0;0f983e3e5be1:41649 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-08T00:48:06,055 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-08T00:48:06,062 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41649-0x10002f29bdd0000, quorum=127.0.0.1:56067, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-08T00:48:06,063 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41649-0x10002f29bdd0000, quorum=127.0.0.1:56067, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:48:06,063 DEBUG [M:0;0f983e3e5be1:41649 {}] zookeeper.ZKUtil(347): master:41649-0x10002f29bdd0000, quorum=127.0.0.1:56067, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-08T00:48:06,063 WARN [M:0;0f983e3e5be1:41649 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-08T00:48:06,064 INFO [M:0;0f983e3e5be1:41649 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:41579/user/jenkins/test-data/46db3ca7-1f5f-cc72-ddba-4f78c39fc9f0/.lastflushedseqids 2024-12-08T00:48:06,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44715 is added to blk_1073741836_1012 (size=99) 2024-12-08T00:48:06,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741836_1012 (size=99) 2024-12-08T00:48:06,074 INFO [M:0;0f983e3e5be1:41649 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-08T00:48:06,074 INFO [M:0;0f983e3e5be1:41649 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-08T00:48:06,074 DEBUG [M:0;0f983e3e5be1:41649 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-08T00:48:06,074 INFO [M:0;0f983e3e5be1:41649 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:48:06,075 DEBUG [M:0;0f983e3e5be1:41649 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:48:06,075 DEBUG [M:0;0f983e3e5be1:41649 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-08T00:48:06,075 DEBUG [M:0;0f983e3e5be1:41649 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:48:06,075 INFO [M:0;0f983e3e5be1:41649 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-12-08T00:48:06,093 DEBUG [M:0;0f983e3e5be1:41649 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41579/user/jenkins/test-data/46db3ca7-1f5f-cc72-ddba-4f78c39fc9f0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b94f509d8b6941e5a31050a2ddb4abec is 82, key is hbase:meta,,1/info:regioninfo/1733618885135/Put/seqid=0 2024-12-08T00:48:06,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44715 is added to blk_1073741837_1013 (size=5672) 2024-12-08T00:48:06,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741837_1013 (size=5672) 2024-12-08T00:48:06,099 INFO [M:0;0f983e3e5be1:41649 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:41579/user/jenkins/test-data/46db3ca7-1f5f-cc72-ddba-4f78c39fc9f0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b94f509d8b6941e5a31050a2ddb4abec 2024-12-08T00:48:06,117 DEBUG [M:0;0f983e3e5be1:41649 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41579/user/jenkins/test-data/46db3ca7-1f5f-cc72-ddba-4f78c39fc9f0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1643588048c2449987414007782b999b is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1733618885212/Put/seqid=0 2024-12-08T00:48:06,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741838_1014 (size=5275) 2024-12-08T00:48:06,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44715 is added to blk_1073741838_1014 (size=5275) 2024-12-08T00:48:06,123 INFO [M:0;0f983e3e5be1:41649 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:41579/user/jenkins/test-data/46db3ca7-1f5f-cc72-ddba-4f78c39fc9f0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1643588048c2449987414007782b999b 2024-12-08T00:48:06,141 DEBUG [M:0;0f983e3e5be1:41649 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41579/user/jenkins/test-data/46db3ca7-1f5f-cc72-ddba-4f78c39fc9f0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/23341ca41f8a43e2ac95eb7a33686998 is 69, key is 0f983e3e5be1,46241,1733618884086/rs:state/1733618884571/Put/seqid=0 2024-12-08T00:48:06,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44715 is added to blk_1073741839_1015 (size=5156) 2024-12-08T00:48:06,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741839_1015 (size=5156) 2024-12-08T00:48:06,146 INFO [M:0;0f983e3e5be1:41649 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:41579/user/jenkins/test-data/46db3ca7-1f5f-cc72-ddba-4f78c39fc9f0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/23341ca41f8a43e2ac95eb7a33686998 2024-12-08T00:48:06,146 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46241-0x10002f29bdd0001, quorum=127.0.0.1:56067, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T00:48:06,146 INFO [RS:0;0f983e3e5be1:46241 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T00:48:06,146 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46241-0x10002f29bdd0001, quorum=127.0.0.1:56067, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T00:48:06,146 INFO [RS:0;0f983e3e5be1:46241 {}] regionserver.HRegionServer(1031): Exiting; stopping=0f983e3e5be1,46241,1733618884086; zookeeper connection closed. 2024-12-08T00:48:06,147 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6bd16a61 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6bd16a61 2024-12-08T00:48:06,147 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-08T00:48:06,165 DEBUG [M:0;0f983e3e5be1:41649 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41579/user/jenkins/test-data/46db3ca7-1f5f-cc72-ddba-4f78c39fc9f0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/61586c6db6e244d2bfd25ab4dc50ffaf is 52, key is load_balancer_on/state:d/1733618885336/Put/seqid=0 2024-12-08T00:48:06,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741840_1016 (size=5056) 2024-12-08T00:48:06,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44715 is added to blk_1073741840_1016 (size=5056) 2024-12-08T00:48:06,171 INFO [M:0;0f983e3e5be1:41649 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:41579/user/jenkins/test-data/46db3ca7-1f5f-cc72-ddba-4f78c39fc9f0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/61586c6db6e244d2bfd25ab4dc50ffaf 2024-12-08T00:48:06,177 DEBUG [M:0;0f983e3e5be1:41649 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41579/user/jenkins/test-data/46db3ca7-1f5f-cc72-ddba-4f78c39fc9f0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b94f509d8b6941e5a31050a2ddb4abec as hdfs://localhost:41579/user/jenkins/test-data/46db3ca7-1f5f-cc72-ddba-4f78c39fc9f0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/b94f509d8b6941e5a31050a2ddb4abec 2024-12-08T00:48:06,183 INFO [M:0;0f983e3e5be1:41649 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41579/user/jenkins/test-data/46db3ca7-1f5f-cc72-ddba-4f78c39fc9f0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/b94f509d8b6941e5a31050a2ddb4abec, entries=8, sequenceid=29, filesize=5.5 K 2024-12-08T00:48:06,185 DEBUG [M:0;0f983e3e5be1:41649 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41579/user/jenkins/test-data/46db3ca7-1f5f-cc72-ddba-4f78c39fc9f0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1643588048c2449987414007782b999b as hdfs://localhost:41579/user/jenkins/test-data/46db3ca7-1f5f-cc72-ddba-4f78c39fc9f0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/1643588048c2449987414007782b999b 2024-12-08T00:48:06,191 INFO [M:0;0f983e3e5be1:41649 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41579/user/jenkins/test-data/46db3ca7-1f5f-cc72-ddba-4f78c39fc9f0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/1643588048c2449987414007782b999b, entries=3, sequenceid=29, filesize=5.2 K 2024-12-08T00:48:06,193 DEBUG [M:0;0f983e3e5be1:41649 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41579/user/jenkins/test-data/46db3ca7-1f5f-cc72-ddba-4f78c39fc9f0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/23341ca41f8a43e2ac95eb7a33686998 as hdfs://localhost:41579/user/jenkins/test-data/46db3ca7-1f5f-cc72-ddba-4f78c39fc9f0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/23341ca41f8a43e2ac95eb7a33686998 2024-12-08T00:48:06,200 INFO [M:0;0f983e3e5be1:41649 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41579/user/jenkins/test-data/46db3ca7-1f5f-cc72-ddba-4f78c39fc9f0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/23341ca41f8a43e2ac95eb7a33686998, entries=1, sequenceid=29, filesize=5.0 K 2024-12-08T00:48:06,201 DEBUG [M:0;0f983e3e5be1:41649 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41579/user/jenkins/test-data/46db3ca7-1f5f-cc72-ddba-4f78c39fc9f0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/61586c6db6e244d2bfd25ab4dc50ffaf as hdfs://localhost:41579/user/jenkins/test-data/46db3ca7-1f5f-cc72-ddba-4f78c39fc9f0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/61586c6db6e244d2bfd25ab4dc50ffaf 2024-12-08T00:48:06,208 INFO [M:0;0f983e3e5be1:41649 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41579/user/jenkins/test-data/46db3ca7-1f5f-cc72-ddba-4f78c39fc9f0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/61586c6db6e244d2bfd25ab4dc50ffaf, entries=1, sequenceid=29, filesize=4.9 K 2024-12-08T00:48:06,209 INFO [M:0;0f983e3e5be1:41649 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 134ms, sequenceid=29, compaction requested=false 2024-12-08T00:48:06,211 INFO [M:0;0f983e3e5be1:41649 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:48:06,211 DEBUG [M:0;0f983e3e5be1:41649 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733618886074Disabling compacts and flushes for region at 1733618886074Disabling writes for close at 1733618886075 (+1 ms)Obtaining lock to block concurrent updates at 1733618886075Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733618886075Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1733618886076 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733618886076Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733618886077 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733618886093 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733618886093Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733618886104 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733618886117 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733618886117Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733618886128 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733618886140 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733618886140Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733618886151 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733618886165 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733618886165Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@58553b08: reopening flushed file at 1733618886176 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@34a70208: reopening flushed file at 1733618886183 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@23a28232: reopening flushed file at 1733618886191 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7d01356f: reopening flushed file at 1733618886200 (+9 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 134ms, sequenceid=29, compaction requested=false at 1733618886209 (+9 ms)Writing region close event to WAL at 1733618886211 (+2 ms)Closed at 1733618886211 2024-12-08T00:48:06,211 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:06,212 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:06,212 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:06,212 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:06,212 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:06,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44715 is added to blk_1073741830_1006 (size=10311) 2024-12-08T00:48:06,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741830_1006 (size=10311) 2024-12-08T00:48:06,215 INFO [M:0;0f983e3e5be1:41649 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-08T00:48:06,215 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T00:48:06,215 INFO [M:0;0f983e3e5be1:41649 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41649 2024-12-08T00:48:06,215 INFO [M:0;0f983e3e5be1:41649 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T00:48:06,338 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41649-0x10002f29bdd0000, quorum=127.0.0.1:56067, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T00:48:06,338 INFO [M:0;0f983e3e5be1:41649 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T00:48:06,338 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41649-0x10002f29bdd0000, quorum=127.0.0.1:56067, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T00:48:06,344 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5a8d969a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T00:48:06,345 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3bfa6a9b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T00:48:06,346 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T00:48:06,346 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3509b1e6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T00:48:06,347 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5c6f09a0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b10cb6e4-f80e-6e45-a74f-d464d7f5cb43/hadoop.log.dir/,STOPPED} 2024-12-08T00:48:06,349 WARN [BP-1919488677-172.17.0.2-1733618882182 heartbeating to localhost/127.0.0.1:41579 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T00:48:06,349 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T00:48:06,349 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T00:48:06,349 WARN [BP-1919488677-172.17.0.2-1733618882182 heartbeating to localhost/127.0.0.1:41579 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1919488677-172.17.0.2-1733618882182 (Datanode Uuid e65f2eeb-09db-4e48-a20d-c2a370227aaf) service to localhost/127.0.0.1:41579 2024-12-08T00:48:06,350 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b10cb6e4-f80e-6e45-a74f-d464d7f5cb43/cluster_6af9d43c-ffe1-1a4c-1e8f-0a2feb81a439/data/data3/current/BP-1919488677-172.17.0.2-1733618882182 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T00:48:06,350 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b10cb6e4-f80e-6e45-a74f-d464d7f5cb43/cluster_6af9d43c-ffe1-1a4c-1e8f-0a2feb81a439/data/data4/current/BP-1919488677-172.17.0.2-1733618882182 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T00:48:06,351 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T00:48:06,353 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6aa2a76b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T00:48:06,353 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7ba10be8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T00:48:06,353 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T00:48:06,353 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@72a59bd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T00:48:06,353 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2d31b0cf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b10cb6e4-f80e-6e45-a74f-d464d7f5cb43/hadoop.log.dir/,STOPPED} 2024-12-08T00:48:06,354 WARN [BP-1919488677-172.17.0.2-1733618882182 heartbeating to localhost/127.0.0.1:41579 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T00:48:06,354 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T00:48:06,354 WARN [BP-1919488677-172.17.0.2-1733618882182 heartbeating to localhost/127.0.0.1:41579 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1919488677-172.17.0.2-1733618882182 (Datanode Uuid 3e11dc30-f146-4b76-b43d-6b37a4dffcb8) service to localhost/127.0.0.1:41579 2024-12-08T00:48:06,354 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T00:48:06,355 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b10cb6e4-f80e-6e45-a74f-d464d7f5cb43/cluster_6af9d43c-ffe1-1a4c-1e8f-0a2feb81a439/data/data1/current/BP-1919488677-172.17.0.2-1733618882182 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T00:48:06,355 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b10cb6e4-f80e-6e45-a74f-d464d7f5cb43/cluster_6af9d43c-ffe1-1a4c-1e8f-0a2feb81a439/data/data2/current/BP-1919488677-172.17.0.2-1733618882182 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T00:48:06,355 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T00:48:06,360 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@f407acd{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-08T00:48:06,361 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@685a66bc{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T00:48:06,361 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T00:48:06,361 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2fbdac8b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T00:48:06,361 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@383a5779{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b10cb6e4-f80e-6e45-a74f-d464d7f5cb43/hadoop.log.dir/,STOPPED} 2024-12-08T00:48:06,368 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-08T00:48:06,393 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-08T00:48:06,393 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-08T00:48:06,394 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b10cb6e4-f80e-6e45-a74f-d464d7f5cb43/hadoop.log.dir so I do NOT create it in target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3 2024-12-08T00:48:06,394 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b10cb6e4-f80e-6e45-a74f-d464d7f5cb43/hadoop.tmp.dir so I do NOT create it in target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3 2024-12-08T00:48:06,394 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/cluster_e74a2886-159c-d840-b9ca-6af3ceb02e7c, deleteOnExit=true 2024-12-08T00:48:06,394 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-08T00:48:06,394 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/test.cache.data in system properties and HBase conf 2024-12-08T00:48:06,394 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/hadoop.tmp.dir in system properties and HBase conf 2024-12-08T00:48:06,394 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/hadoop.log.dir in system properties and HBase conf 2024-12-08T00:48:06,394 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-08T00:48:06,395 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-08T00:48:06,395 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-08T00:48:06,395 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-08T00:48:06,395 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-08T00:48:06,395 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-08T00:48:06,395 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-08T00:48:06,395 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-08T00:48:06,395 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-08T00:48:06,395 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-08T00:48:06,395 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-08T00:48:06,396 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-08T00:48:06,396 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-08T00:48:06,396 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/nfs.dump.dir in system properties and HBase conf 2024-12-08T00:48:06,396 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/java.io.tmpdir in system properties and HBase conf 2024-12-08T00:48:06,396 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-08T00:48:06,396 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-08T00:48:06,396 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-08T00:48:06,407 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-08T00:48:06,465 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:48:06,476 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:48:06,591 INFO [regionserver/0f983e3e5be1:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T00:48:06,624 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-08T00:48:06,626 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:48:06,637 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:48:06,638 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:48:06,639 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:48:06,657 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T00:48:06,662 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T00:48:06,663 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T00:48:06,663 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T00:48:06,663 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T00:48:06,665 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T00:48:06,666 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@151bb937{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/hadoop.log.dir/,AVAILABLE} 2024-12-08T00:48:06,666 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@10e6067e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T00:48:06,756 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2aec9805{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/java.io.tmpdir/jetty-localhost-36937-hadoop-hdfs-3_4_1-tests_jar-_-any-4222545274920314470/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-08T00:48:06,756 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7cd8d56a{HTTP/1.1, (http/1.1)}{localhost:36937} 2024-12-08T00:48:06,756 INFO [Time-limited test {}] server.Server(415): Started @106186ms 2024-12-08T00:48:06,767 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-08T00:48:06,974 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T00:48:06,977 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T00:48:06,978 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T00:48:06,978 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T00:48:06,978 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-08T00:48:06,978 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@227fed5b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/hadoop.log.dir/,AVAILABLE} 2024-12-08T00:48:06,979 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5bca9002{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T00:48:07,069 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5be1ef63{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/java.io.tmpdir/jetty-localhost-38165-hadoop-hdfs-3_4_1-tests_jar-_-any-5664279106546835536/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T00:48:07,070 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@16c96639{HTTP/1.1, (http/1.1)}{localhost:38165} 2024-12-08T00:48:07,070 INFO [Time-limited test {}] server.Server(415): Started @106500ms 2024-12-08T00:48:07,072 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T00:48:07,096 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T00:48:07,099 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T00:48:07,100 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T00:48:07,100 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T00:48:07,100 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-08T00:48:07,101 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@474543c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/hadoop.log.dir/,AVAILABLE} 2024-12-08T00:48:07,101 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2082a925{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T00:48:07,189 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@331aadeb{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/java.io.tmpdir/jetty-localhost-33363-hadoop-hdfs-3_4_1-tests_jar-_-any-192532103078046984/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T00:48:07,189 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@a7c2e3d{HTTP/1.1, (http/1.1)}{localhost:33363} 2024-12-08T00:48:07,190 INFO [Time-limited test {}] server.Server(415): Started @106619ms 2024-12-08T00:48:07,191 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T00:48:07,884 WARN [Thread-673 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/cluster_e74a2886-159c-d840-b9ca-6af3ceb02e7c/data/data1/current/BP-791727382-172.17.0.2-1733618886417/current, will proceed with Du for space computation calculation, 2024-12-08T00:48:07,884 WARN [Thread-674 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/cluster_e74a2886-159c-d840-b9ca-6af3ceb02e7c/data/data2/current/BP-791727382-172.17.0.2-1733618886417/current, will proceed with Du for space computation calculation, 2024-12-08T00:48:07,906 WARN [Thread-637 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T00:48:07,909 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd9650ef8cf24c7c7 with lease ID 0x3478e23613d90b46: Processing first storage report for DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8 from datanode DatanodeRegistration(127.0.0.1:45779, datanodeUuid=e5a9bc8e-62a5-43a3-880f-00777e4e84dd, infoPort=34497, infoSecurePort=0, ipcPort=42159, storageInfo=lv=-57;cid=testClusterID;nsid=1708086423;c=1733618886417) 2024-12-08T00:48:07,909 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd9650ef8cf24c7c7 with lease ID 0x3478e23613d90b46: from storage DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8 node DatanodeRegistration(127.0.0.1:45779, datanodeUuid=e5a9bc8e-62a5-43a3-880f-00777e4e84dd, infoPort=34497, infoSecurePort=0, ipcPort=42159, storageInfo=lv=-57;cid=testClusterID;nsid=1708086423;c=1733618886417), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-08T00:48:07,909 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd9650ef8cf24c7c7 with lease ID 0x3478e23613d90b46: Processing first storage report for DS-a56211c9-74a1-4ee7-b4d9-a8a48522dabf from datanode DatanodeRegistration(127.0.0.1:45779, datanodeUuid=e5a9bc8e-62a5-43a3-880f-00777e4e84dd, infoPort=34497, infoSecurePort=0, ipcPort=42159, storageInfo=lv=-57;cid=testClusterID;nsid=1708086423;c=1733618886417) 2024-12-08T00:48:07,909 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd9650ef8cf24c7c7 with lease ID 0x3478e23613d90b46: from storage DS-a56211c9-74a1-4ee7-b4d9-a8a48522dabf node DatanodeRegistration(127.0.0.1:45779, datanodeUuid=e5a9bc8e-62a5-43a3-880f-00777e4e84dd, infoPort=34497, infoSecurePort=0, ipcPort=42159, storageInfo=lv=-57;cid=testClusterID;nsid=1708086423;c=1733618886417), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T00:48:08,002 WARN [Thread-684 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/cluster_e74a2886-159c-d840-b9ca-6af3ceb02e7c/data/data3/current/BP-791727382-172.17.0.2-1733618886417/current, will proceed with Du for space computation calculation, 2024-12-08T00:48:08,002 WARN [Thread-685 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/cluster_e74a2886-159c-d840-b9ca-6af3ceb02e7c/data/data4/current/BP-791727382-172.17.0.2-1733618886417/current, will proceed with Du for space computation calculation, 2024-12-08T00:48:08,019 WARN [Thread-660 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T00:48:08,021 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8f7d89d6518006e1 with lease ID 0x3478e23613d90b47: Processing first storage report for DS-796f6687-3a76-48ad-a50d-1d0026d41077 from datanode DatanodeRegistration(127.0.0.1:37879, datanodeUuid=94032d42-a92f-496f-a54d-179c92434f44, infoPort=43141, infoSecurePort=0, ipcPort=35775, storageInfo=lv=-57;cid=testClusterID;nsid=1708086423;c=1733618886417) 2024-12-08T00:48:08,021 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8f7d89d6518006e1 with lease ID 0x3478e23613d90b47: from storage DS-796f6687-3a76-48ad-a50d-1d0026d41077 node DatanodeRegistration(127.0.0.1:37879, datanodeUuid=94032d42-a92f-496f-a54d-179c92434f44, infoPort=43141, infoSecurePort=0, ipcPort=35775, storageInfo=lv=-57;cid=testClusterID;nsid=1708086423;c=1733618886417), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T00:48:08,021 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8f7d89d6518006e1 with lease ID 0x3478e23613d90b47: Processing first storage report for DS-9659f3c7-076f-4572-a003-e7d63b0b73ee from datanode DatanodeRegistration(127.0.0.1:37879, datanodeUuid=94032d42-a92f-496f-a54d-179c92434f44, infoPort=43141, infoSecurePort=0, ipcPort=35775, storageInfo=lv=-57;cid=testClusterID;nsid=1708086423;c=1733618886417) 2024-12-08T00:48:08,021 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8f7d89d6518006e1 with lease ID 0x3478e23613d90b47: from storage DS-9659f3c7-076f-4572-a003-e7d63b0b73ee node DatanodeRegistration(127.0.0.1:37879, datanodeUuid=94032d42-a92f-496f-a54d-179c92434f44, infoPort=43141, infoSecurePort=0, ipcPort=35775, storageInfo=lv=-57;cid=testClusterID;nsid=1708086423;c=1733618886417), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T00:48:08,123 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3 2024-12-08T00:48:08,126 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/cluster_e74a2886-159c-d840-b9ca-6af3ceb02e7c/zookeeper_0, clientPort=53241, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/cluster_e74a2886-159c-d840-b9ca-6af3ceb02e7c/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/cluster_e74a2886-159c-d840-b9ca-6af3ceb02e7c/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-08T00:48:08,127 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=53241 2024-12-08T00:48:08,128 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:48:08,130 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:48:08,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45779 is added to blk_1073741825_1001 (size=7) 2024-12-08T00:48:08,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37879 is added to blk_1073741825_1001 (size=7) 2024-12-08T00:48:08,141 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956 with version=8 2024-12-08T00:48:08,141 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/hbase-staging 2024-12-08T00:48:08,143 INFO [Time-limited test {}] client.ConnectionUtils(128): master/0f983e3e5be1:0 server-side Connection retries=45 2024-12-08T00:48:08,143 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T00:48:08,143 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T00:48:08,143 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T00:48:08,143 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T00:48:08,143 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T00:48:08,143 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-08T00:48:08,143 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T00:48:08,144 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41445 2024-12-08T00:48:08,145 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:41445 connecting to ZooKeeper ensemble=127.0.0.1:53241 2024-12-08T00:48:08,189 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:414450x0, quorum=127.0.0.1:53241, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T00:48:08,190 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41445-0x10002f2ac5a0000 connected 2024-12-08T00:48:08,262 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:48:08,263 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:48:08,265 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41445-0x10002f2ac5a0000, quorum=127.0.0.1:53241, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T00:48:08,266 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956, hbase.cluster.distributed=false 2024-12-08T00:48:08,267 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41445-0x10002f2ac5a0000, quorum=127.0.0.1:53241, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T00:48:08,268 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41445 2024-12-08T00:48:08,268 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41445 2024-12-08T00:48:08,268 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41445 2024-12-08T00:48:08,269 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41445 2024-12-08T00:48:08,269 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41445 2024-12-08T00:48:08,281 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/0f983e3e5be1:0 server-side Connection retries=45 2024-12-08T00:48:08,282 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T00:48:08,282 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T00:48:08,282 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T00:48:08,282 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T00:48:08,282 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T00:48:08,282 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-08T00:48:08,282 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T00:48:08,283 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33993 2024-12-08T00:48:08,284 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33993 connecting to ZooKeeper ensemble=127.0.0.1:53241 2024-12-08T00:48:08,284 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:48:08,286 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:48:08,295 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:339930x0, quorum=127.0.0.1:53241, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T00:48:08,296 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:339930x0, quorum=127.0.0.1:53241, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T00:48:08,296 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33993-0x10002f2ac5a0001 connected 2024-12-08T00:48:08,296 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-08T00:48:08,297 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-08T00:48:08,297 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33993-0x10002f2ac5a0001, quorum=127.0.0.1:53241, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-08T00:48:08,298 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33993-0x10002f2ac5a0001, quorum=127.0.0.1:53241, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T00:48:08,299 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33993 2024-12-08T00:48:08,299 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33993 2024-12-08T00:48:08,303 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33993 2024-12-08T00:48:08,303 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33993 2024-12-08T00:48:08,304 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33993 2024-12-08T00:48:08,314 DEBUG [M:0;0f983e3e5be1:41445 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;0f983e3e5be1:41445 2024-12-08T00:48:08,314 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/0f983e3e5be1,41445,1733618888143 2024-12-08T00:48:08,320 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41445-0x10002f2ac5a0000, quorum=127.0.0.1:53241, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T00:48:08,320 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33993-0x10002f2ac5a0001, quorum=127.0.0.1:53241, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T00:48:08,321 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41445-0x10002f2ac5a0000, quorum=127.0.0.1:53241, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/0f983e3e5be1,41445,1733618888143 2024-12-08T00:48:08,328 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33993-0x10002f2ac5a0001, quorum=127.0.0.1:53241, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-08T00:48:08,328 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41445-0x10002f2ac5a0000, quorum=127.0.0.1:53241, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:48:08,328 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33993-0x10002f2ac5a0001, quorum=127.0.0.1:53241, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:48:08,329 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41445-0x10002f2ac5a0000, quorum=127.0.0.1:53241, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-08T00:48:08,330 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/0f983e3e5be1,41445,1733618888143 from backup master directory 2024-12-08T00:48:08,337 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41445-0x10002f2ac5a0000, quorum=127.0.0.1:53241, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/0f983e3e5be1,41445,1733618888143 2024-12-08T00:48:08,337 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33993-0x10002f2ac5a0001, quorum=127.0.0.1:53241, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T00:48:08,337 WARN [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T00:48:08,337 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=0f983e3e5be1,41445,1733618888143 2024-12-08T00:48:08,337 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41445-0x10002f2ac5a0000, quorum=127.0.0.1:53241, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T00:48:08,341 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/hbase.id] with ID: beb5a8b1-dd6f-4c4a-962a-d98ad458f964 2024-12-08T00:48:08,341 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/.tmp/hbase.id 2024-12-08T00:48:08,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45779 is added to blk_1073741826_1002 (size=42) 2024-12-08T00:48:08,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37879 is added to blk_1073741826_1002 (size=42) 2024-12-08T00:48:08,349 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/.tmp/hbase.id]:[hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/hbase.id] 2024-12-08T00:48:08,362 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:48:08,362 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-08T00:48:08,363 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-08T00:48:08,370 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33993-0x10002f2ac5a0001, quorum=127.0.0.1:53241, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:48:08,370 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41445-0x10002f2ac5a0000, quorum=127.0.0.1:53241, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:48:08,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37879 is added to blk_1073741827_1003 (size=196) 2024-12-08T00:48:08,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45779 is added to blk_1073741827_1003 (size=196) 2024-12-08T00:48:08,378 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T00:48:08,379 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-08T00:48:08,379 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T00:48:08,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45779 is added to blk_1073741828_1004 (size=1189) 2024-12-08T00:48:08,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37879 is added to blk_1073741828_1004 (size=1189) 2024-12-08T00:48:08,389 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/MasterData/data/master/store 2024-12-08T00:48:08,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45779 is added to blk_1073741829_1005 (size=34) 2024-12-08T00:48:08,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37879 is added to blk_1073741829_1005 (size=34) 2024-12-08T00:48:08,396 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:48:08,396 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-08T00:48:08,397 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:48:08,397 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:48:08,397 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-08T00:48:08,397 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:48:08,397 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:48:08,397 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733618888396Disabling compacts and flushes for region at 1733618888396Disabling writes for close at 1733618888397 (+1 ms)Writing region close event to WAL at 1733618888397Closed at 1733618888397 2024-12-08T00:48:08,398 WARN [master/0f983e3e5be1:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/MasterData/data/master/store/.initializing 2024-12-08T00:48:08,398 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/MasterData/WALs/0f983e3e5be1,41445,1733618888143 2024-12-08T00:48:08,401 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0f983e3e5be1%2C41445%2C1733618888143, suffix=, logDir=hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/MasterData/WALs/0f983e3e5be1,41445,1733618888143, archiveDir=hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/MasterData/oldWALs, maxLogs=10 2024-12-08T00:48:08,402 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0f983e3e5be1%2C41445%2C1733618888143.1733618888401 2024-12-08T00:48:08,407 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/MasterData/WALs/0f983e3e5be1,41445,1733618888143/0f983e3e5be1%2C41445%2C1733618888143.1733618888401 2024-12-08T00:48:08,408 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34497:34497),(127.0.0.1/127.0.0.1:43141:43141)] 2024-12-08T00:48:08,408 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-08T00:48:08,408 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:48:08,408 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:48:08,409 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:48:08,410 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:48:08,411 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-08T00:48:08,411 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:48:08,412 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:48:08,412 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:48:08,413 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-08T00:48:08,413 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:48:08,413 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T00:48:08,413 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:48:08,415 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-08T00:48:08,415 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:48:08,415 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T00:48:08,415 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:48:08,417 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-08T00:48:08,417 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:48:08,417 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T00:48:08,417 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:48:08,418 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:48:08,419 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:48:08,421 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:48:08,421 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:48:08,421 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-08T00:48:08,423 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:48:08,425 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T00:48:08,426 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=870679, jitterRate=0.10712666809558868}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-08T00:48:08,427 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733618888409Initializing all the Stores at 1733618888410 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733618888410Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733618888410Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733618888410Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733618888410Cleaning up temporary data from old regions at 1733618888421 (+11 ms)Region opened successfully at 1733618888427 (+6 ms) 2024-12-08T00:48:08,428 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-08T00:48:08,432 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c27d4e0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0f983e3e5be1/172.17.0.2:0 2024-12-08T00:48:08,433 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-08T00:48:08,433 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-08T00:48:08,433 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-08T00:48:08,433 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-08T00:48:08,434 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-08T00:48:08,434 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-08T00:48:08,434 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-08T00:48:08,436 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-08T00:48:08,437 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41445-0x10002f2ac5a0000, quorum=127.0.0.1:53241, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-08T00:48:08,462 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-08T00:48:08,462 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-08T00:48:08,463 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41445-0x10002f2ac5a0000, quorum=127.0.0.1:53241, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-08T00:48:08,470 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-08T00:48:08,470 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-08T00:48:08,472 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41445-0x10002f2ac5a0000, quorum=127.0.0.1:53241, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-08T00:48:08,478 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-08T00:48:08,480 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41445-0x10002f2ac5a0000, quorum=127.0.0.1:53241, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-08T00:48:08,487 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-08T00:48:08,489 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41445-0x10002f2ac5a0000, quorum=127.0.0.1:53241, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-08T00:48:08,495 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-08T00:48:08,503 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33993-0x10002f2ac5a0001, quorum=127.0.0.1:53241, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T00:48:08,504 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33993-0x10002f2ac5a0001, quorum=127.0.0.1:53241, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:48:08,504 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41445-0x10002f2ac5a0000, quorum=127.0.0.1:53241, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T00:48:08,504 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41445-0x10002f2ac5a0000, quorum=127.0.0.1:53241, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:48:08,504 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=0f983e3e5be1,41445,1733618888143, sessionid=0x10002f2ac5a0000, setting cluster-up flag (Was=false) 2024-12-08T00:48:08,520 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33993-0x10002f2ac5a0001, quorum=127.0.0.1:53241, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:48:08,520 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41445-0x10002f2ac5a0000, quorum=127.0.0.1:53241, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:48:08,545 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-08T00:48:08,547 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0f983e3e5be1,41445,1733618888143 2024-12-08T00:48:08,562 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41445-0x10002f2ac5a0000, quorum=127.0.0.1:53241, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:48:08,562 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33993-0x10002f2ac5a0001, quorum=127.0.0.1:53241, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:48:08,587 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-08T00:48:08,588 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0f983e3e5be1,41445,1733618888143 2024-12-08T00:48:08,589 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-08T00:48:08,591 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-08T00:48:08,592 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-08T00:48:08,592 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-08T00:48:08,592 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 0f983e3e5be1,41445,1733618888143 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-08T00:48:08,593 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/0f983e3e5be1:0, corePoolSize=5, maxPoolSize=5 2024-12-08T00:48:08,593 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/0f983e3e5be1:0, corePoolSize=5, maxPoolSize=5 2024-12-08T00:48:08,594 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/0f983e3e5be1:0, corePoolSize=5, maxPoolSize=5 2024-12-08T00:48:08,594 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/0f983e3e5be1:0, corePoolSize=5, maxPoolSize=5 2024-12-08T00:48:08,594 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/0f983e3e5be1:0, corePoolSize=10, maxPoolSize=10 2024-12-08T00:48:08,594 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:48:08,594 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/0f983e3e5be1:0, corePoolSize=2, maxPoolSize=2 2024-12-08T00:48:08,594 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:48:08,595 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733618918595 2024-12-08T00:48:08,595 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-08T00:48:08,595 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-08T00:48:08,595 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-08T00:48:08,595 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-08T00:48:08,595 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-08T00:48:08,595 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-08T00:48:08,595 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:08,596 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-08T00:48:08,596 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-08T00:48:08,596 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T00:48:08,596 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-08T00:48:08,596 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-08T00:48:08,596 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-08T00:48:08,597 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-08T00:48:08,598 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:48:08,598 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-08T00:48:08,603 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/0f983e3e5be1:0:becomeActiveMaster-HFileCleaner.large.0-1733618888597,5,FailOnTimeoutGroup] 2024-12-08T00:48:08,603 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/0f983e3e5be1:0:becomeActiveMaster-HFileCleaner.small.0-1733618888603,5,FailOnTimeoutGroup] 2024-12-08T00:48:08,603 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:08,603 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-08T00:48:08,603 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:08,603 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:08,616 INFO [RS:0;0f983e3e5be1:33993 {}] regionserver.HRegionServer(746): ClusterId : beb5a8b1-dd6f-4c4a-962a-d98ad458f964 2024-12-08T00:48:08,616 DEBUG [RS:0;0f983e3e5be1:33993 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-08T00:48:08,621 DEBUG [RS:0;0f983e3e5be1:33993 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-08T00:48:08,621 DEBUG [RS:0;0f983e3e5be1:33993 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-08T00:48:08,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45779 is added to blk_1073741831_1007 (size=1321) 2024-12-08T00:48:08,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37879 is added to blk_1073741831_1007 (size=1321) 2024-12-08T00:48:08,629 DEBUG [RS:0;0f983e3e5be1:33993 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-08T00:48:08,630 DEBUG [RS:0;0f983e3e5be1:33993 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@753efdb0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0f983e3e5be1/172.17.0.2:0 2024-12-08T00:48:08,645 DEBUG [RS:0;0f983e3e5be1:33993 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;0f983e3e5be1:33993 2024-12-08T00:48:08,645 INFO [RS:0;0f983e3e5be1:33993 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-08T00:48:08,645 INFO [RS:0;0f983e3e5be1:33993 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-08T00:48:08,645 DEBUG [RS:0;0f983e3e5be1:33993 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-08T00:48:08,646 INFO [RS:0;0f983e3e5be1:33993 {}] regionserver.HRegionServer(2659): reportForDuty to master=0f983e3e5be1,41445,1733618888143 with port=33993, startcode=1733618888281 2024-12-08T00:48:08,647 DEBUG [RS:0;0f983e3e5be1:33993 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-08T00:48:08,649 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35679, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-08T00:48:08,649 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41445 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 0f983e3e5be1,33993,1733618888281 2024-12-08T00:48:08,649 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41445 {}] master.ServerManager(517): Registering regionserver=0f983e3e5be1,33993,1733618888281 2024-12-08T00:48:08,651 DEBUG [RS:0;0f983e3e5be1:33993 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956 2024-12-08T00:48:08,651 DEBUG [RS:0;0f983e3e5be1:33993 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34853 2024-12-08T00:48:08,651 DEBUG [RS:0;0f983e3e5be1:33993 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-08T00:48:08,662 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41445-0x10002f2ac5a0000, quorum=127.0.0.1:53241, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T00:48:08,662 DEBUG [RS:0;0f983e3e5be1:33993 {}] zookeeper.ZKUtil(111): regionserver:33993-0x10002f2ac5a0001, quorum=127.0.0.1:53241, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/0f983e3e5be1,33993,1733618888281 2024-12-08T00:48:08,662 WARN [RS:0;0f983e3e5be1:33993 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T00:48:08,662 INFO [RS:0;0f983e3e5be1:33993 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T00:48:08,663 DEBUG [RS:0;0f983e3e5be1:33993 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281 2024-12-08T00:48:08,663 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [0f983e3e5be1,33993,1733618888281] 2024-12-08T00:48:08,666 INFO [RS:0;0f983e3e5be1:33993 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-08T00:48:08,670 INFO [RS:0;0f983e3e5be1:33993 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-08T00:48:08,670 INFO [RS:0;0f983e3e5be1:33993 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-08T00:48:08,670 INFO [RS:0;0f983e3e5be1:33993 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:08,670 INFO [RS:0;0f983e3e5be1:33993 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-08T00:48:08,671 INFO [RS:0;0f983e3e5be1:33993 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-08T00:48:08,671 INFO [RS:0;0f983e3e5be1:33993 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:08,671 DEBUG [RS:0;0f983e3e5be1:33993 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:48:08,672 DEBUG [RS:0;0f983e3e5be1:33993 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:48:08,672 DEBUG [RS:0;0f983e3e5be1:33993 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:48:08,672 DEBUG [RS:0;0f983e3e5be1:33993 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:48:08,672 DEBUG [RS:0;0f983e3e5be1:33993 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:48:08,672 DEBUG [RS:0;0f983e3e5be1:33993 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/0f983e3e5be1:0, corePoolSize=2, maxPoolSize=2 2024-12-08T00:48:08,672 DEBUG [RS:0;0f983e3e5be1:33993 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:48:08,672 DEBUG [RS:0;0f983e3e5be1:33993 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:48:08,672 DEBUG [RS:0;0f983e3e5be1:33993 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:48:08,672 DEBUG [RS:0;0f983e3e5be1:33993 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:48:08,672 DEBUG [RS:0;0f983e3e5be1:33993 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:48:08,672 DEBUG [RS:0;0f983e3e5be1:33993 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:48:08,672 DEBUG [RS:0;0f983e3e5be1:33993 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/0f983e3e5be1:0, corePoolSize=3, maxPoolSize=3 2024-12-08T00:48:08,672 DEBUG [RS:0;0f983e3e5be1:33993 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0, corePoolSize=3, maxPoolSize=3 2024-12-08T00:48:08,675 INFO [RS:0;0f983e3e5be1:33993 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:08,675 INFO [RS:0;0f983e3e5be1:33993 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:08,675 INFO [RS:0;0f983e3e5be1:33993 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:08,675 INFO [RS:0;0f983e3e5be1:33993 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:08,675 INFO [RS:0;0f983e3e5be1:33993 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:08,675 INFO [RS:0;0f983e3e5be1:33993 {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,33993,1733618888281-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T00:48:08,711 INFO [RS:0;0f983e3e5be1:33993 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-08T00:48:08,711 INFO [RS:0;0f983e3e5be1:33993 {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,33993,1733618888281-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:08,711 INFO [RS:0;0f983e3e5be1:33993 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:08,711 INFO [RS:0;0f983e3e5be1:33993 {}] regionserver.Replication(171): 0f983e3e5be1,33993,1733618888281 started 2024-12-08T00:48:08,723 INFO [RS:0;0f983e3e5be1:33993 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:08,723 INFO [RS:0;0f983e3e5be1:33993 {}] regionserver.HRegionServer(1482): Serving as 0f983e3e5be1,33993,1733618888281, RpcServer on 0f983e3e5be1/172.17.0.2:33993, sessionid=0x10002f2ac5a0001 2024-12-08T00:48:08,723 DEBUG [RS:0;0f983e3e5be1:33993 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-08T00:48:08,723 DEBUG [RS:0;0f983e3e5be1:33993 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 0f983e3e5be1,33993,1733618888281 2024-12-08T00:48:08,723 DEBUG [RS:0;0f983e3e5be1:33993 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0f983e3e5be1,33993,1733618888281' 2024-12-08T00:48:08,723 DEBUG [RS:0;0f983e3e5be1:33993 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-08T00:48:08,724 DEBUG [RS:0;0f983e3e5be1:33993 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-08T00:48:08,725 DEBUG [RS:0;0f983e3e5be1:33993 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-08T00:48:08,725 DEBUG [RS:0;0f983e3e5be1:33993 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-08T00:48:08,725 DEBUG [RS:0;0f983e3e5be1:33993 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 0f983e3e5be1,33993,1733618888281 2024-12-08T00:48:08,725 DEBUG [RS:0;0f983e3e5be1:33993 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0f983e3e5be1,33993,1733618888281' 2024-12-08T00:48:08,725 DEBUG [RS:0;0f983e3e5be1:33993 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-08T00:48:08,725 DEBUG [RS:0;0f983e3e5be1:33993 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-08T00:48:08,726 DEBUG [RS:0;0f983e3e5be1:33993 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-08T00:48:08,726 INFO [RS:0;0f983e3e5be1:33993 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-08T00:48:08,726 INFO [RS:0;0f983e3e5be1:33993 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-08T00:48:08,828 INFO [RS:0;0f983e3e5be1:33993 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0f983e3e5be1%2C33993%2C1733618888281, suffix=, logDir=hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281, archiveDir=hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/oldWALs, maxLogs=32 2024-12-08T00:48:08,829 INFO [RS:0;0f983e3e5be1:33993 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0f983e3e5be1%2C33993%2C1733618888281.1733618888829 2024-12-08T00:48:08,835 INFO [RS:0;0f983e3e5be1:33993 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.1733618888829 2024-12-08T00:48:08,839 DEBUG [RS:0;0f983e3e5be1:33993 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34497:34497),(127.0.0.1/127.0.0.1:43141:43141)] 2024-12-08T00:48:09,024 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-08T00:48:09,025 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956 2024-12-08T00:48:09,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37879 is added to blk_1073741833_1009 (size=32) 2024-12-08T00:48:09,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45779 is added to blk_1073741833_1009 (size=32) 2024-12-08T00:48:09,037 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:48:09,039 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-08T00:48:09,041 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-08T00:48:09,041 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:48:09,042 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:48:09,042 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-08T00:48:09,044 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-08T00:48:09,044 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:48:09,045 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:48:09,045 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-08T00:48:09,048 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-08T00:48:09,048 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:48:09,049 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:48:09,049 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-08T00:48:09,051 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-08T00:48:09,051 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:48:09,052 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:48:09,052 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-08T00:48:09,053 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/hbase/meta/1588230740 2024-12-08T00:48:09,053 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/hbase/meta/1588230740 2024-12-08T00:48:09,055 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-08T00:48:09,055 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-08T00:48:09,056 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-08T00:48:09,058 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-08T00:48:09,061 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T00:48:09,061 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=750476, jitterRate=-0.04572071135044098}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-08T00:48:09,062 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733618889037Initializing all the Stores at 1733618889038 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733618889038Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733618889039 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733618889039Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733618889039Cleaning up temporary data from old regions at 1733618889055 (+16 ms)Region opened successfully at 1733618889062 (+7 ms) 2024-12-08T00:48:09,062 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-08T00:48:09,062 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-08T00:48:09,062 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-08T00:48:09,062 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-08T00:48:09,062 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-08T00:48:09,063 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-08T00:48:09,063 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733618889062Disabling compacts and flushes for region at 1733618889062Disabling writes for close at 1733618889062Writing region close event to WAL at 1733618889063 (+1 ms)Closed at 1733618889063 2024-12-08T00:48:09,064 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T00:48:09,064 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-08T00:48:09,064 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-08T00:48:09,066 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-08T00:48:09,067 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-08T00:48:09,217 DEBUG [0f983e3e5be1:41445 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-08T00:48:09,218 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=0f983e3e5be1,33993,1733618888281 2024-12-08T00:48:09,221 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0f983e3e5be1,33993,1733618888281, state=OPENING 2024-12-08T00:48:09,270 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-08T00:48:09,278 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33993-0x10002f2ac5a0001, quorum=127.0.0.1:53241, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:48:09,278 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41445-0x10002f2ac5a0000, quorum=127.0.0.1:53241, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:48:09,279 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-08T00:48:09,279 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T00:48:09,279 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T00:48:09,279 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=0f983e3e5be1,33993,1733618888281}] 2024-12-08T00:48:09,434 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-08T00:48:09,440 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36115, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-08T00:48:09,447 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-08T00:48:09,447 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T00:48:09,449 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0f983e3e5be1%2C33993%2C1733618888281.meta, suffix=.meta, logDir=hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281, archiveDir=hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/oldWALs, maxLogs=32 2024-12-08T00:48:09,450 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta 2024-12-08T00:48:09,455 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta 2024-12-08T00:48:09,463 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43141:43141),(127.0.0.1/127.0.0.1:34497:34497)] 2024-12-08T00:48:09,471 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-08T00:48:09,471 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-08T00:48:09,471 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-08T00:48:09,472 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-08T00:48:09,472 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-08T00:48:09,472 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:48:09,472 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-08T00:48:09,472 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-08T00:48:09,473 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-08T00:48:09,474 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-08T00:48:09,474 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:48:09,475 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:48:09,475 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-08T00:48:09,476 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-08T00:48:09,476 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:48:09,476 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:48:09,476 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-08T00:48:09,477 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-08T00:48:09,477 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:48:09,477 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:48:09,477 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-08T00:48:09,478 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-08T00:48:09,478 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:48:09,479 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:48:09,479 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-08T00:48:09,480 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/hbase/meta/1588230740 2024-12-08T00:48:09,481 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/hbase/meta/1588230740 2024-12-08T00:48:09,482 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-08T00:48:09,482 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-08T00:48:09,483 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-08T00:48:09,484 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-08T00:48:09,485 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=748971, jitterRate=-0.04763457179069519}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-08T00:48:09,485 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-08T00:48:09,486 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733618889472Writing region info on filesystem at 1733618889472Initializing all the Stores at 1733618889473 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733618889473Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733618889473Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733618889473Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733618889473Cleaning up temporary data from old regions at 1733618889482 (+9 ms)Running coprocessor post-open hooks at 1733618889485 (+3 ms)Region opened successfully at 1733618889486 (+1 ms) 2024-12-08T00:48:09,487 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733618889434 2024-12-08T00:48:09,489 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-08T00:48:09,490 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-08T00:48:09,490 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=0f983e3e5be1,33993,1733618888281 2024-12-08T00:48:09,491 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0f983e3e5be1,33993,1733618888281, state=OPEN 2024-12-08T00:48:09,535 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41445-0x10002f2ac5a0000, quorum=127.0.0.1:53241, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T00:48:09,535 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33993-0x10002f2ac5a0001, quorum=127.0.0.1:53241, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T00:48:09,535 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=0f983e3e5be1,33993,1733618888281 2024-12-08T00:48:09,535 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T00:48:09,535 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T00:48:09,539 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-08T00:48:09,539 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=0f983e3e5be1,33993,1733618888281 in 256 msec 2024-12-08T00:48:09,544 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-08T00:48:09,544 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 475 msec 2024-12-08T00:48:09,545 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T00:48:09,545 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-08T00:48:09,547 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-08T00:48:09,548 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=0f983e3e5be1,33993,1733618888281, seqNum=-1] 2024-12-08T00:48:09,548 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:48:09,549 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57691, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:48:09,557 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 965 msec 2024-12-08T00:48:09,558 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733618889557, completionTime=-1 2024-12-08T00:48:09,558 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-08T00:48:09,558 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-08T00:48:09,560 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-08T00:48:09,560 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733618949560 2024-12-08T00:48:09,560 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733619009560 2024-12-08T00:48:09,560 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-08T00:48:09,560 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,41445,1733618888143-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:09,560 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,41445,1733618888143-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:09,560 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,41445,1733618888143-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:09,560 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-0f983e3e5be1:41445, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:09,561 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:09,561 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:09,562 DEBUG [master/0f983e3e5be1:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-08T00:48:09,565 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.227sec 2024-12-08T00:48:09,565 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-08T00:48:09,565 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-08T00:48:09,565 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-08T00:48:09,565 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-08T00:48:09,565 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-08T00:48:09,565 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,41445,1733618888143-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T00:48:09,565 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,41445,1733618888143-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-08T00:48:09,567 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-08T00:48:09,567 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-08T00:48:09,567 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,41445,1733618888143-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:09,616 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7ad2d2c6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:48:09,617 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 0f983e3e5be1,41445,-1 for getting cluster id 2024-12-08T00:48:09,617 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-08T00:48:09,619 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'beb5a8b1-dd6f-4c4a-962a-d98ad458f964' 2024-12-08T00:48:09,619 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-08T00:48:09,619 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "beb5a8b1-dd6f-4c4a-962a-d98ad458f964" 2024-12-08T00:48:09,620 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4429846e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:48:09,620 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [0f983e3e5be1,41445,-1] 2024-12-08T00:48:09,620 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-08T00:48:09,621 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:48:09,623 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38398, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-08T00:48:09,624 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6fb3c2cd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:48:09,625 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-08T00:48:09,626 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=0f983e3e5be1,33993,1733618888281, seqNum=-1] 2024-12-08T00:48:09,627 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:48:09,629 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35090, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:48:09,631 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=0f983e3e5be1,41445,1733618888143 2024-12-08T00:48:09,631 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:48:09,634 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-08T00:48:09,648 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/0f983e3e5be1:0 server-side Connection retries=45 2024-12-08T00:48:09,648 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T00:48:09,648 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T00:48:09,648 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T00:48:09,648 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T00:48:09,648 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T00:48:09,648 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-08T00:48:09,648 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T00:48:09,649 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46325 2024-12-08T00:48:09,650 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46325 connecting to ZooKeeper ensemble=127.0.0.1:53241 2024-12-08T00:48:09,650 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:48:09,652 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:48:09,686 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:463250x0, quorum=127.0.0.1:53241, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T00:48:09,687 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46325-0x10002f2ac5a0002 connected 2024-12-08T00:48:09,687 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:46325-0x10002f2ac5a0002, quorum=127.0.0.1:53241, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-12-08T00:48:09,687 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-12-08T00:48:09,688 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-08T00:48:09,688 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-08T00:48:09,689 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:46325-0x10002f2ac5a0002, quorum=127.0.0.1:53241, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-08T00:48:09,690 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46325-0x10002f2ac5a0002, quorum=127.0.0.1:53241, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T00:48:09,691 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46325 2024-12-08T00:48:09,695 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46325 2024-12-08T00:48:09,695 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46325 2024-12-08T00:48:09,696 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46325 2024-12-08T00:48:09,697 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46325 2024-12-08T00:48:09,699 INFO [RS:1;0f983e3e5be1:46325 {}] regionserver.HRegionServer(746): ClusterId : beb5a8b1-dd6f-4c4a-962a-d98ad458f964 2024-12-08T00:48:09,699 DEBUG [RS:1;0f983e3e5be1:46325 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-08T00:48:09,703 DEBUG [RS:1;0f983e3e5be1:46325 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-08T00:48:09,703 DEBUG [RS:1;0f983e3e5be1:46325 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-08T00:48:09,712 DEBUG [RS:1;0f983e3e5be1:46325 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-08T00:48:09,713 DEBUG [RS:1;0f983e3e5be1:46325 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@146d41d5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0f983e3e5be1/172.17.0.2:0 2024-12-08T00:48:09,725 DEBUG [RS:1;0f983e3e5be1:46325 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;0f983e3e5be1:46325 2024-12-08T00:48:09,725 INFO [RS:1;0f983e3e5be1:46325 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-08T00:48:09,725 INFO [RS:1;0f983e3e5be1:46325 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-08T00:48:09,725 DEBUG [RS:1;0f983e3e5be1:46325 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-08T00:48:09,726 INFO [RS:1;0f983e3e5be1:46325 {}] regionserver.HRegionServer(2659): reportForDuty to master=0f983e3e5be1,41445,1733618888143 with port=46325, startcode=1733618889648 2024-12-08T00:48:09,726 DEBUG [RS:1;0f983e3e5be1:46325 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-08T00:48:09,727 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35943, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-08T00:48:09,728 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41445 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 0f983e3e5be1,46325,1733618889648 2024-12-08T00:48:09,728 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41445 {}] master.ServerManager(517): Registering regionserver=0f983e3e5be1,46325,1733618889648 2024-12-08T00:48:09,729 DEBUG [RS:1;0f983e3e5be1:46325 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956 2024-12-08T00:48:09,729 DEBUG [RS:1;0f983e3e5be1:46325 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34853 2024-12-08T00:48:09,729 DEBUG [RS:1;0f983e3e5be1:46325 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-08T00:48:09,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41445-0x10002f2ac5a0000, quorum=127.0.0.1:53241, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T00:48:09,737 DEBUG [RS:1;0f983e3e5be1:46325 {}] zookeeper.ZKUtil(111): regionserver:46325-0x10002f2ac5a0002, quorum=127.0.0.1:53241, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/0f983e3e5be1,46325,1733618889648 2024-12-08T00:48:09,737 WARN [RS:1;0f983e3e5be1:46325 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T00:48:09,737 INFO [RS:1;0f983e3e5be1:46325 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T00:48:09,737 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [0f983e3e5be1,46325,1733618889648] 2024-12-08T00:48:09,737 DEBUG [RS:1;0f983e3e5be1:46325 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648 2024-12-08T00:48:09,741 INFO [RS:1;0f983e3e5be1:46325 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-08T00:48:09,743 INFO [RS:1;0f983e3e5be1:46325 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-08T00:48:09,744 INFO [RS:1;0f983e3e5be1:46325 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-08T00:48:09,744 INFO [RS:1;0f983e3e5be1:46325 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:09,744 INFO [RS:1;0f983e3e5be1:46325 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-08T00:48:09,745 INFO [RS:1;0f983e3e5be1:46325 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-08T00:48:09,745 INFO [RS:1;0f983e3e5be1:46325 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:09,746 DEBUG [RS:1;0f983e3e5be1:46325 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:48:09,746 DEBUG [RS:1;0f983e3e5be1:46325 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:48:09,746 DEBUG [RS:1;0f983e3e5be1:46325 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:48:09,746 DEBUG [RS:1;0f983e3e5be1:46325 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:48:09,746 DEBUG [RS:1;0f983e3e5be1:46325 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:48:09,746 DEBUG [RS:1;0f983e3e5be1:46325 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/0f983e3e5be1:0, corePoolSize=2, maxPoolSize=2 2024-12-08T00:48:09,746 DEBUG [RS:1;0f983e3e5be1:46325 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:48:09,746 DEBUG [RS:1;0f983e3e5be1:46325 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:48:09,746 DEBUG [RS:1;0f983e3e5be1:46325 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:48:09,746 DEBUG [RS:1;0f983e3e5be1:46325 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:48:09,746 DEBUG [RS:1;0f983e3e5be1:46325 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:48:09,746 DEBUG [RS:1;0f983e3e5be1:46325 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:48:09,746 DEBUG [RS:1;0f983e3e5be1:46325 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/0f983e3e5be1:0, corePoolSize=3, maxPoolSize=3 2024-12-08T00:48:09,747 DEBUG [RS:1;0f983e3e5be1:46325 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0, corePoolSize=3, maxPoolSize=3 2024-12-08T00:48:09,748 INFO [RS:1;0f983e3e5be1:46325 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:09,748 INFO [RS:1;0f983e3e5be1:46325 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:09,748 INFO [RS:1;0f983e3e5be1:46325 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:09,748 INFO [RS:1;0f983e3e5be1:46325 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:09,748 INFO [RS:1;0f983e3e5be1:46325 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:09,748 INFO [RS:1;0f983e3e5be1:46325 {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,46325,1733618889648-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T00:48:09,760 INFO [RS:1;0f983e3e5be1:46325 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-08T00:48:09,761 INFO [RS:1;0f983e3e5be1:46325 {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,46325,1733618889648-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:09,761 INFO [RS:1;0f983e3e5be1:46325 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:09,761 INFO [RS:1;0f983e3e5be1:46325 {}] regionserver.Replication(171): 0f983e3e5be1,46325,1733618889648 started 2024-12-08T00:48:09,772 INFO [RS:1;0f983e3e5be1:46325 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:09,772 INFO [RS:1;0f983e3e5be1:46325 {}] regionserver.HRegionServer(1482): Serving as 0f983e3e5be1,46325,1733618889648, RpcServer on 0f983e3e5be1/172.17.0.2:46325, sessionid=0x10002f2ac5a0002 2024-12-08T00:48:09,772 DEBUG [RS:1;0f983e3e5be1:46325 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-08T00:48:09,772 DEBUG [RS:1;0f983e3e5be1:46325 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 0f983e3e5be1,46325,1733618889648 2024-12-08T00:48:09,772 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;0f983e3e5be1:46325,5,FailOnTimeoutGroup] 2024-12-08T00:48:09,772 DEBUG [RS:1;0f983e3e5be1:46325 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0f983e3e5be1,46325,1733618889648' 2024-12-08T00:48:09,772 DEBUG [RS:1;0f983e3e5be1:46325 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-08T00:48:09,772 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-12-08T00:48:09,773 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-08T00:48:09,773 DEBUG [RS:1;0f983e3e5be1:46325 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-08T00:48:09,773 DEBUG [RS:1;0f983e3e5be1:46325 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-08T00:48:09,773 DEBUG [RS:1;0f983e3e5be1:46325 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-08T00:48:09,773 DEBUG [RS:1;0f983e3e5be1:46325 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 0f983e3e5be1,46325,1733618889648 2024-12-08T00:48:09,773 DEBUG [RS:1;0f983e3e5be1:46325 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0f983e3e5be1,46325,1733618889648' 2024-12-08T00:48:09,773 DEBUG [RS:1;0f983e3e5be1:46325 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-08T00:48:09,774 DEBUG [RS:1;0f983e3e5be1:46325 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-08T00:48:09,774 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is 0f983e3e5be1,41445,1733618888143 2024-12-08T00:48:09,774 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@2a7612cc 2024-12-08T00:48:09,774 DEBUG [RS:1;0f983e3e5be1:46325 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-08T00:48:09,774 INFO [RS:1;0f983e3e5be1:46325 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-08T00:48:09,774 INFO [RS:1;0f983e3e5be1:46325 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-08T00:48:09,774 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-08T00:48:09,776 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38406, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-08T00:48:09,776 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41445 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-08T00:48:09,776 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41445 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-08T00:48:09,777 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41445 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T00:48:09,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41445 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-12-08T00:48:09,779 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-12-08T00:48:09,779 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:48:09,780 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41445 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-12-08T00:48:09,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41445 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-08T00:48:09,781 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-08T00:48:09,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45779 is added to blk_1073741835_1011 (size=393) 2024-12-08T00:48:09,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37879 is added to blk_1073741835_1011 (size=393) 2024-12-08T00:48:09,790 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 8eb80a1ef3b100de075d28e6732c96d0, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1733618889776.8eb80a1ef3b100de075d28e6732c96d0.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956 2024-12-08T00:48:09,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37879 is added to blk_1073741836_1012 (size=76) 2024-12-08T00:48:09,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45779 is added to blk_1073741836_1012 (size=76) 2024-12-08T00:48:09,797 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1733618889776.8eb80a1ef3b100de075d28e6732c96d0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:48:09,797 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing 8eb80a1ef3b100de075d28e6732c96d0, disabling compactions & flushes 2024-12-08T00:48:09,797 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1733618889776.8eb80a1ef3b100de075d28e6732c96d0. 2024-12-08T00:48:09,797 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733618889776.8eb80a1ef3b100de075d28e6732c96d0. 2024-12-08T00:48:09,797 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733618889776.8eb80a1ef3b100de075d28e6732c96d0. after waiting 0 ms 2024-12-08T00:48:09,797 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1733618889776.8eb80a1ef3b100de075d28e6732c96d0. 2024-12-08T00:48:09,797 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733618889776.8eb80a1ef3b100de075d28e6732c96d0. 2024-12-08T00:48:09,797 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for 8eb80a1ef3b100de075d28e6732c96d0: Waiting for close lock at 1733618889797Disabling compacts and flushes for region at 1733618889797Disabling writes for close at 1733618889797Writing region close event to WAL at 1733618889797Closed at 1733618889797 2024-12-08T00:48:09,799 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-12-08T00:48:09,799 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1733618889776.8eb80a1ef3b100de075d28e6732c96d0.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1733618889799"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733618889799"}]},"ts":"1733618889799"} 2024-12-08T00:48:09,801 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-08T00:48:09,803 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-08T00:48:09,803 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733618889803"}]},"ts":"1733618889803"} 2024-12-08T00:48:09,805 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-12-08T00:48:09,806 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=8eb80a1ef3b100de075d28e6732c96d0, ASSIGN}] 2024-12-08T00:48:09,807 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=8eb80a1ef3b100de075d28e6732c96d0, ASSIGN 2024-12-08T00:48:09,808 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=8eb80a1ef3b100de075d28e6732c96d0, ASSIGN; state=OFFLINE, location=0f983e3e5be1,33993,1733618888281; forceNewPlan=false, retain=false 2024-12-08T00:48:09,878 INFO [RS:1;0f983e3e5be1:46325 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0f983e3e5be1%2C46325%2C1733618889648, suffix=, logDir=hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648, archiveDir=hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/oldWALs, maxLogs=32 2024-12-08T00:48:09,880 INFO [RS:1;0f983e3e5be1:46325 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0f983e3e5be1%2C46325%2C1733618889648.1733618889880 2024-12-08T00:48:09,889 INFO [RS:1;0f983e3e5be1:46325 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 2024-12-08T00:48:09,895 DEBUG [RS:1;0f983e3e5be1:46325 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34497:34497),(127.0.0.1/127.0.0.1:43141:43141)] 2024-12-08T00:48:09,959 INFO [0f983e3e5be1:41445 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-08T00:48:09,959 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=8eb80a1ef3b100de075d28e6732c96d0, regionState=OPENING, regionLocation=0f983e3e5be1,33993,1733618888281 2024-12-08T00:48:09,963 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=8eb80a1ef3b100de075d28e6732c96d0, ASSIGN because future has completed 2024-12-08T00:48:09,963 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8eb80a1ef3b100de075d28e6732c96d0, server=0f983e3e5be1,33993,1733618888281}] 2024-12-08T00:48:10,128 INFO [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1733618889776.8eb80a1ef3b100de075d28e6732c96d0. 2024-12-08T00:48:10,128 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 8eb80a1ef3b100de075d28e6732c96d0, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1733618889776.8eb80a1ef3b100de075d28e6732c96d0.', STARTKEY => '', ENDKEY => ''} 2024-12-08T00:48:10,129 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath 8eb80a1ef3b100de075d28e6732c96d0 2024-12-08T00:48:10,129 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1733618889776.8eb80a1ef3b100de075d28e6732c96d0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:48:10,129 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 8eb80a1ef3b100de075d28e6732c96d0 2024-12-08T00:48:10,129 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 8eb80a1ef3b100de075d28e6732c96d0 2024-12-08T00:48:10,131 INFO [StoreOpener-8eb80a1ef3b100de075d28e6732c96d0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 8eb80a1ef3b100de075d28e6732c96d0 2024-12-08T00:48:10,134 INFO [StoreOpener-8eb80a1ef3b100de075d28e6732c96d0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8eb80a1ef3b100de075d28e6732c96d0 columnFamilyName info 2024-12-08T00:48:10,134 DEBUG [StoreOpener-8eb80a1ef3b100de075d28e6732c96d0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:48:10,134 INFO [StoreOpener-8eb80a1ef3b100de075d28e6732c96d0-1 {}] regionserver.HStore(327): Store=8eb80a1ef3b100de075d28e6732c96d0/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T00:48:10,135 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 8eb80a1ef3b100de075d28e6732c96d0 2024-12-08T00:48:10,136 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0 2024-12-08T00:48:10,136 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0 2024-12-08T00:48:10,137 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 8eb80a1ef3b100de075d28e6732c96d0 2024-12-08T00:48:10,137 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 8eb80a1ef3b100de075d28e6732c96d0 2024-12-08T00:48:10,139 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 8eb80a1ef3b100de075d28e6732c96d0 2024-12-08T00:48:10,142 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T00:48:10,142 INFO [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 8eb80a1ef3b100de075d28e6732c96d0; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=788067, jitterRate=0.002079695463180542}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-08T00:48:10,142 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 8eb80a1ef3b100de075d28e6732c96d0 2024-12-08T00:48:10,143 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 8eb80a1ef3b100de075d28e6732c96d0: Running coprocessor pre-open hook at 1733618890130Writing region info on filesystem at 1733618890130Initializing all the Stores at 1733618890131 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733618890131Cleaning up temporary data from old regions at 1733618890137 (+6 ms)Running coprocessor post-open hooks at 1733618890142 (+5 ms)Region opened successfully at 1733618890143 (+1 ms) 2024-12-08T00:48:10,144 INFO [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1733618889776.8eb80a1ef3b100de075d28e6732c96d0., pid=6, masterSystemTime=1733618890117 2024-12-08T00:48:10,147 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1733618889776.8eb80a1ef3b100de075d28e6732c96d0. 2024-12-08T00:48:10,147 INFO [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1733618889776.8eb80a1ef3b100de075d28e6732c96d0. 2024-12-08T00:48:10,148 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=8eb80a1ef3b100de075d28e6732c96d0, regionState=OPEN, openSeqNum=2, regionLocation=0f983e3e5be1,33993,1733618888281 2024-12-08T00:48:10,151 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8eb80a1ef3b100de075d28e6732c96d0, server=0f983e3e5be1,33993,1733618888281 because future has completed 2024-12-08T00:48:10,155 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-08T00:48:10,156 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 8eb80a1ef3b100de075d28e6732c96d0, server=0f983e3e5be1,33993,1733618888281 in 190 msec 2024-12-08T00:48:10,158 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-08T00:48:10,158 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=8eb80a1ef3b100de075d28e6732c96d0, ASSIGN in 349 msec 2024-12-08T00:48:10,159 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-08T00:48:10,159 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733618890159"}]},"ts":"1733618890159"} 2024-12-08T00:48:10,161 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-12-08T00:48:10,163 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-12-08T00:48:10,165 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 386 msec 2024-12-08T00:48:14,976 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-08T00:48:14,980 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:48:14,998 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:48:14,999 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:48:14,999 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:48:15,006 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-12-08T00:48:15,912 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-08T00:48:15,912 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-08T00:48:15,914 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-12-08T00:48:15,914 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-12-08T00:48:15,915 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-08T00:48:15,915 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-08T00:48:19,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41445 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-08T00:48:19,817 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-12-08T00:48:19,817 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-12-08T00:48:19,824 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-12-08T00:48:19,824 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1733618889776.8eb80a1ef3b100de075d28e6732c96d0. 2024-12-08T00:48:19,838 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T00:48:19,841 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T00:48:19,842 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T00:48:19,842 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T00:48:19,842 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-08T00:48:19,842 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5aa86c35{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/hadoop.log.dir/,AVAILABLE} 2024-12-08T00:48:19,843 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2452cc37{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T00:48:19,931 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1fa52141{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/java.io.tmpdir/jetty-localhost-41007-hadoop-hdfs-3_4_1-tests_jar-_-any-14307714161544014509/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T00:48:19,931 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7c6ba029{HTTP/1.1, (http/1.1)}{localhost:41007} 2024-12-08T00:48:19,931 INFO [Time-limited test {}] server.Server(415): Started @119361ms 2024-12-08T00:48:19,932 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T00:48:19,963 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T00:48:19,966 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T00:48:19,966 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T00:48:19,967 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T00:48:19,967 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T00:48:19,968 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@54916ccd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/hadoop.log.dir/,AVAILABLE} 2024-12-08T00:48:19,968 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@74c0fccc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T00:48:20,057 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@33f41830{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/java.io.tmpdir/jetty-localhost-35079-hadoop-hdfs-3_4_1-tests_jar-_-any-10505385367890867990/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T00:48:20,057 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c16d06a{HTTP/1.1, (http/1.1)}{localhost:35079} 2024-12-08T00:48:20,057 INFO [Time-limited test {}] server.Server(415): Started @119487ms 2024-12-08T00:48:20,058 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T00:48:20,084 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T00:48:20,087 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T00:48:20,087 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T00:48:20,087 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T00:48:20,087 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T00:48:20,088 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@66f331b6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/hadoop.log.dir/,AVAILABLE} 2024-12-08T00:48:20,088 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3aae8f75{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T00:48:20,181 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@b3c44a7{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/java.io.tmpdir/jetty-localhost-33429-hadoop-hdfs-3_4_1-tests_jar-_-any-16366843341821797726/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T00:48:20,182 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6682cb77{HTTP/1.1, (http/1.1)}{localhost:33429} 2024-12-08T00:48:20,182 INFO [Time-limited test {}] server.Server(415): Started @119612ms 2024-12-08T00:48:20,183 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T00:48:20,822 WARN [Thread-867 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/cluster_e74a2886-159c-d840-b9ca-6af3ceb02e7c/data/data5/current/BP-791727382-172.17.0.2-1733618886417/current, will proceed with Du for space computation calculation, 2024-12-08T00:48:20,822 WARN [Thread-868 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/cluster_e74a2886-159c-d840-b9ca-6af3ceb02e7c/data/data6/current/BP-791727382-172.17.0.2-1733618886417/current, will proceed with Du for space computation calculation, 2024-12-08T00:48:20,841 WARN [Thread-809 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T00:48:20,843 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x207a76d249099724 with lease ID 0x3478e23613d90b48: Processing first storage report for DS-c01c0122-359b-4f15-840f-3c1bed6f09a4 from datanode DatanodeRegistration(127.0.0.1:36435, datanodeUuid=e89283f3-cf6c-4c07-84c2-710371276ec2, infoPort=45933, infoSecurePort=0, ipcPort=33373, storageInfo=lv=-57;cid=testClusterID;nsid=1708086423;c=1733618886417) 2024-12-08T00:48:20,843 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x207a76d249099724 with lease ID 0x3478e23613d90b48: from storage DS-c01c0122-359b-4f15-840f-3c1bed6f09a4 node DatanodeRegistration(127.0.0.1:36435, datanodeUuid=e89283f3-cf6c-4c07-84c2-710371276ec2, infoPort=45933, infoSecurePort=0, ipcPort=33373, storageInfo=lv=-57;cid=testClusterID;nsid=1708086423;c=1733618886417), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T00:48:20,843 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x207a76d249099724 with lease ID 0x3478e23613d90b48: Processing first storage report for DS-ced4d1bd-7dc8-44ef-8115-e0a17dd4b9b8 from datanode DatanodeRegistration(127.0.0.1:36435, datanodeUuid=e89283f3-cf6c-4c07-84c2-710371276ec2, infoPort=45933, infoSecurePort=0, ipcPort=33373, storageInfo=lv=-57;cid=testClusterID;nsid=1708086423;c=1733618886417) 2024-12-08T00:48:20,843 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x207a76d249099724 with lease ID 0x3478e23613d90b48: from storage DS-ced4d1bd-7dc8-44ef-8115-e0a17dd4b9b8 node DatanodeRegistration(127.0.0.1:36435, datanodeUuid=e89283f3-cf6c-4c07-84c2-710371276ec2, infoPort=45933, infoSecurePort=0, ipcPort=33373, storageInfo=lv=-57;cid=testClusterID;nsid=1708086423;c=1733618886417), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T00:48:20,969 WARN [Thread-880 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/cluster_e74a2886-159c-d840-b9ca-6af3ceb02e7c/data/data8/current/BP-791727382-172.17.0.2-1733618886417/current, will proceed with Du for space computation calculation, 2024-12-08T00:48:20,969 WARN [Thread-879 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/cluster_e74a2886-159c-d840-b9ca-6af3ceb02e7c/data/data7/current/BP-791727382-172.17.0.2-1733618886417/current, will proceed with Du for space computation calculation, 2024-12-08T00:48:20,983 WARN [Thread-831 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T00:48:20,985 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4a4f60bbdb4c6de4 with lease ID 0x3478e23613d90b49: Processing first storage report for DS-fb1c61b5-07ec-4bb6-9b70-43af2b3fbbb9 from datanode DatanodeRegistration(127.0.0.1:33451, datanodeUuid=a687aeef-559d-4266-ae5f-7c18617a39af, infoPort=46073, infoSecurePort=0, ipcPort=43427, storageInfo=lv=-57;cid=testClusterID;nsid=1708086423;c=1733618886417) 2024-12-08T00:48:20,985 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4a4f60bbdb4c6de4 with lease ID 0x3478e23613d90b49: from storage DS-fb1c61b5-07ec-4bb6-9b70-43af2b3fbbb9 node DatanodeRegistration(127.0.0.1:33451, datanodeUuid=a687aeef-559d-4266-ae5f-7c18617a39af, infoPort=46073, infoSecurePort=0, ipcPort=43427, storageInfo=lv=-57;cid=testClusterID;nsid=1708086423;c=1733618886417), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T00:48:20,985 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4a4f60bbdb4c6de4 with lease ID 0x3478e23613d90b49: Processing first storage report for DS-9747a7b9-6878-4462-aab8-54277a9bd1e9 from datanode DatanodeRegistration(127.0.0.1:33451, datanodeUuid=a687aeef-559d-4266-ae5f-7c18617a39af, infoPort=46073, infoSecurePort=0, ipcPort=43427, storageInfo=lv=-57;cid=testClusterID;nsid=1708086423;c=1733618886417) 2024-12-08T00:48:20,985 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4a4f60bbdb4c6de4 with lease ID 0x3478e23613d90b49: from storage DS-9747a7b9-6878-4462-aab8-54277a9bd1e9 node DatanodeRegistration(127.0.0.1:33451, datanodeUuid=a687aeef-559d-4266-ae5f-7c18617a39af, infoPort=46073, infoSecurePort=0, ipcPort=43427, storageInfo=lv=-57;cid=testClusterID;nsid=1708086423;c=1733618886417), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T00:48:21,081 WARN [Thread-890 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/cluster_e74a2886-159c-d840-b9ca-6af3ceb02e7c/data/data9/current/BP-791727382-172.17.0.2-1733618886417/current, will proceed with Du for space computation calculation, 2024-12-08T00:48:21,081 WARN [Thread-891 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/cluster_e74a2886-159c-d840-b9ca-6af3ceb02e7c/data/data10/current/BP-791727382-172.17.0.2-1733618886417/current, will proceed with Du for space computation calculation, 2024-12-08T00:48:21,100 WARN [Thread-853 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T00:48:21,102 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb4da095e001020e7 with lease ID 0x3478e23613d90b4a: Processing first storage report for DS-958b2e8f-e0e1-44dc-a03b-46f8f4aa8441 from datanode DatanodeRegistration(127.0.0.1:36109, datanodeUuid=cab2c1f4-0b38-495b-81c5-dd7bc55561eb, infoPort=36397, infoSecurePort=0, ipcPort=41757, storageInfo=lv=-57;cid=testClusterID;nsid=1708086423;c=1733618886417) 2024-12-08T00:48:21,102 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb4da095e001020e7 with lease ID 0x3478e23613d90b4a: from storage DS-958b2e8f-e0e1-44dc-a03b-46f8f4aa8441 node DatanodeRegistration(127.0.0.1:36109, datanodeUuid=cab2c1f4-0b38-495b-81c5-dd7bc55561eb, infoPort=36397, infoSecurePort=0, ipcPort=41757, storageInfo=lv=-57;cid=testClusterID;nsid=1708086423;c=1733618886417), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T00:48:21,102 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb4da095e001020e7 with lease ID 0x3478e23613d90b4a: Processing first storage report for DS-01f62f7e-17ac-4fdc-ab0a-eb4b515362a5 from datanode DatanodeRegistration(127.0.0.1:36109, datanodeUuid=cab2c1f4-0b38-495b-81c5-dd7bc55561eb, infoPort=36397, infoSecurePort=0, ipcPort=41757, storageInfo=lv=-57;cid=testClusterID;nsid=1708086423;c=1733618886417) 2024-12-08T00:48:21,102 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb4da095e001020e7 with lease ID 0x3478e23613d90b4a: from storage DS-01f62f7e-17ac-4fdc-ab0a-eb4b515362a5 node DatanodeRegistration(127.0.0.1:36109, datanodeUuid=cab2c1f4-0b38-495b-81c5-dd7bc55561eb, infoPort=36397, infoSecurePort=0, ipcPort=41757, storageInfo=lv=-57;cid=testClusterID;nsid=1708086423;c=1733618886417), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T00:48:21,108 WARN [ResponseProcessor for block BP-791727382-172.17.0.2-1733618886417:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-791727382-172.17.0.2-1733618886417:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:21,109 WARN [DataStreamer for file /user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta block BP-791727382-172.17.0.2-1733618886417:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-791727382-172.17.0.2-1733618886417:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37879,DS-796f6687-3a76-48ad-a50d-1d0026d41077,DISK], DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37879,DS-796f6687-3a76-48ad-a50d-1d0026d41077,DISK]) is bad. 2024-12-08T00:48:21,109 WARN [ResponseProcessor for block BP-791727382-172.17.0.2-1733618886417:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-791727382-172.17.0.2-1733618886417:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-791727382-172.17.0.2-1733618886417:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:37879,DS-796f6687-3a76-48ad-a50d-1d0026d41077,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:21,109 WARN [ResponseProcessor for block BP-791727382-172.17.0.2-1733618886417:blk_1073741832_1008 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-791727382-172.17.0.2-1733618886417:blk_1073741832_1008 java.io.IOException: Bad response ERROR for BP-791727382-172.17.0.2-1733618886417:blk_1073741832_1008 from datanode DatanodeInfoWithStorage[127.0.0.1:37879,DS-796f6687-3a76-48ad-a50d-1d0026d41077,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:21,109 WARN [DataStreamer for file /user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/MasterData/WALs/0f983e3e5be1,41445,1733618888143/0f983e3e5be1%2C41445%2C1733618888143.1733618888401 block BP-791727382-172.17.0.2-1733618886417:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-791727382-172.17.0.2-1733618886417:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK], DatanodeInfoWithStorage[127.0.0.1:37879,DS-796f6687-3a76-48ad-a50d-1d0026d41077,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37879,DS-796f6687-3a76-48ad-a50d-1d0026d41077,DISK]) is bad. 2024-12-08T00:48:21,109 WARN [ResponseProcessor for block BP-791727382-172.17.0.2-1733618886417:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-791727382-172.17.0.2-1733618886417:blk_1073741837_1013 java.io.IOException: Bad response ERROR for BP-791727382-172.17.0.2-1733618886417:blk_1073741837_1013 from datanode DatanodeInfoWithStorage[127.0.0.1:37879,DS-796f6687-3a76-48ad-a50d-1d0026d41077,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:21,109 WARN [DataStreamer for file /user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.1733618888829 block BP-791727382-172.17.0.2-1733618886417:blk_1073741832_1008 {}] hdfs.DataStreamer(1731): Error Recovery for BP-791727382-172.17.0.2-1733618886417:blk_1073741832_1008 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK], DatanodeInfoWithStorage[127.0.0.1:37879,DS-796f6687-3a76-48ad-a50d-1d0026d41077,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37879,DS-796f6687-3a76-48ad-a50d-1d0026d41077,DISK]) is bad. 2024-12-08T00:48:21,109 WARN [DataStreamer for file /user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 block BP-791727382-172.17.0.2-1733618886417:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-791727382-172.17.0.2-1733618886417:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK], DatanodeInfoWithStorage[127.0.0.1:37879,DS-796f6687-3a76-48ad-a50d-1d0026d41077,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37879,DS-796f6687-3a76-48ad-a50d-1d0026d41077,DISK]) is bad. 2024-12-08T00:48:21,109 WARN [PacketResponder: BP-791727382-172.17.0.2-1733618886417:blk_1073741837_1013, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:37879] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:48:21,110 WARN [PacketResponder: BP-791727382-172.17.0.2-1733618886417:blk_1073741832_1008, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:37879] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:48:21,110 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@331aadeb{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T00:48:21,110 WARN [PacketResponder: BP-791727382-172.17.0.2-1733618886417:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:37879] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:48:21,111 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@a7c2e3d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T00:48:21,111 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T00:48:21,111 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2082a925{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T00:48:21,111 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@474543c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/hadoop.log.dir/,STOPPED} 2024-12-08T00:48:21,111 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1208211810_22 at /127.0.0.1:41836 [Receiving block BP-791727382-172.17.0.2-1733618886417:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:37879:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41836 dst: /127.0.0.1:37879 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:48:21,112 WARN [BP-791727382-172.17.0.2-1733618886417 heartbeating to localhost/127.0.0.1:34853 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T00:48:21,112 WARN [BP-791727382-172.17.0.2-1733618886417 heartbeating to localhost/127.0.0.1:34853 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-791727382-172.17.0.2-1733618886417 (Datanode Uuid 94032d42-a92f-496f-a54d-179c92434f44) service to localhost/127.0.0.1:34853 2024-12-08T00:48:21,113 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T00:48:21,115 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5be1ef63{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T00:48:21,115 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/cluster_e74a2886-159c-d840-b9ca-6af3ceb02e7c/data/data4/current/BP-791727382-172.17.0.2-1733618886417 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T00:48:21,115 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/cluster_e74a2886-159c-d840-b9ca-6af3ceb02e7c/data/data3/current/BP-791727382-172.17.0.2-1733618886417 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T00:48:21,115 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1484286605_22 at /127.0.0.1:49566 [Receiving block BP-791727382-172.17.0.2-1733618886417:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:45779:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49566 dst: /127.0.0.1:45779 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:48:21,115 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1208211810_22 at /127.0.0.1:49500 [Receiving block BP-791727382-172.17.0.2-1733618886417:blk_1073741832_1008] {}] datanode.DataXceiver(331): 127.0.0.1:45779:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49500 dst: /127.0.0.1:45779 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:48:21,115 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1208211810_22 at /127.0.0.1:49528 [Receiving block BP-791727382-172.17.0.2-1733618886417:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:45779:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49528 dst: /127.0.0.1:45779 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:48:21,115 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_7213670_22 at /127.0.0.1:41782 [Receiving block BP-791727382-172.17.0.2-1733618886417:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:37879:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41782 dst: /127.0.0.1:37879 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:48:21,115 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@16c96639{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T00:48:21,115 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T00:48:21,115 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1484286605_22 at /127.0.0.1:41864 [Receiving block BP-791727382-172.17.0.2-1733618886417:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:37879:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41864 dst: /127.0.0.1:37879 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:48:21,115 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5bca9002{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T00:48:21,115 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_7213670_22 at /127.0.0.1:49486 [Receiving block BP-791727382-172.17.0.2-1733618886417:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:45779:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49486 dst: /127.0.0.1:45779 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:48:21,116 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@227fed5b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/hadoop.log.dir/,STOPPED} 2024-12-08T00:48:21,116 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T00:48:21,116 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T00:48:21,116 WARN [BP-791727382-172.17.0.2-1733618886417 heartbeating to localhost/127.0.0.1:34853 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T00:48:21,117 WARN [BP-791727382-172.17.0.2-1733618886417 heartbeating to localhost/127.0.0.1:34853 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-791727382-172.17.0.2-1733618886417 (Datanode Uuid e5a9bc8e-62a5-43a3-880f-00777e4e84dd) service to localhost/127.0.0.1:34853 2024-12-08T00:48:21,115 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1208211810_22 at /127.0.0.1:41812 [Receiving block BP-791727382-172.17.0.2-1733618886417:blk_1073741832_1008] {}] datanode.DataXceiver(331): 127.0.0.1:37879:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41812 dst: /127.0.0.1:37879 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:48:21,117 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T00:48:21,117 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T00:48:21,117 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/cluster_e74a2886-159c-d840-b9ca-6af3ceb02e7c/data/data1/current/BP-791727382-172.17.0.2-1733618886417 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T00:48:21,117 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T00:48:21,118 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/cluster_e74a2886-159c-d840-b9ca-6af3ceb02e7c/data/data2/current/BP-791727382-172.17.0.2-1733618886417 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T00:48:21,122 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1733618889776.8eb80a1ef3b100de075d28e6732c96d0., hostname=0f983e3e5be1,33993,1733618888281, seqNum=2] 2024-12-08T00:48:21,122 WARN [DataStreamer for file /user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta block BP-791727382-172.17.0.2-1733618886417:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:21,122 WARN [DataStreamer for file /user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/MasterData/WALs/0f983e3e5be1,41445,1733618888143/0f983e3e5be1%2C41445%2C1733618888143.1733618888401 block BP-791727382-172.17.0.2-1733618886417:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:21,123 WARN [DataStreamer for file /user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.1733618888829 block BP-791727382-172.17.0.2-1733618886417:blk_1073741832_1008 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741832_1008 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:21,124 ERROR [FSHLog-0-hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956-prefix:0f983e3e5be1,33993,1733618888281 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:21,124 WARN [FSHLog-0-hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956-prefix:0f983e3e5be1,33993,1733618888281 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:21,124 DEBUG [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 0f983e3e5be1%2C33993%2C1733618888281:(num 1733618888829) roll requested 2024-12-08T00:48:21,124 WARN [DataStreamer for file /user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 block BP-791727382-172.17.0.2-1733618886417:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:21,124 INFO [regionserver/0f983e3e5be1:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0f983e3e5be1%2C33993%2C1733618888281.1733618901124 2024-12-08T00:48:21,131 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:21,131 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:21,131 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:21,131 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:21,132 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:21,132 INFO [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.1733618888829 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.1733618901124 2024-12-08T00:48:21,132 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:21,132 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:21,132 DEBUG [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45933:45933),(127.0.0.1/127.0.0.1:46073:46073)] 2024-12-08T00:48:21,132 DEBUG [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.1733618888829 is not closed yet, will try archiving it next time 2024-12-08T00:48:21,133 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-12-08T00:48:21,133 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-12-08T00:48:21,134 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.1733618888829 2024-12-08T00:48:21,136 WARN [IPC Server handler 1 on default port 34853 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.1733618888829 has not been closed. Lease recovery is in progress. RecoveryId = 1019 for block blk_1073741832_1008 2024-12-08T00:48:21,138 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.1733618888829 after 3ms 2024-12-08T00:48:21,748 INFO [regionserver/0f983e3e5be1:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:22,381 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:23,133 INFO [regionserver/0f983e3e5be1:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:23,134 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.1733618901124 2024-12-08T00:48:23,136 WARN [ResponseProcessor for block BP-791727382-172.17.0.2-1733618886417:blk_1073741838_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-791727382-172.17.0.2-1733618886417:blk_1073741838_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:23,136 WARN [DataStreamer for file /user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.1733618901124 block BP-791727382-172.17.0.2-1733618886417:blk_1073741838_1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-791727382-172.17.0.2-1733618886417:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36435,DS-c01c0122-359b-4f15-840f-3c1bed6f09a4,DISK], DatanodeInfoWithStorage[127.0.0.1:33451,DS-fb1c61b5-07ec-4bb6-9b70-43af2b3fbbb9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36435,DS-c01c0122-359b-4f15-840f-3c1bed6f09a4,DISK]) is bad. 2024-12-08T00:48:23,137 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1208211810_22 at /127.0.0.1:55794 [Receiving block BP-791727382-172.17.0.2-1733618886417:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:36435:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55794 dst: /127.0.0.1:36435 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:48:23,138 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1208211810_22 at /127.0.0.1:36482 [Receiving block BP-791727382-172.17.0.2-1733618886417:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:33451:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36482 dst: /127.0.0.1:33451 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:48:23,177 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1fa52141{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T00:48:23,178 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7c6ba029{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T00:48:23,178 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T00:48:23,178 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2452cc37{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T00:48:23,178 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5aa86c35{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/hadoop.log.dir/,STOPPED} 2024-12-08T00:48:23,180 WARN [BP-791727382-172.17.0.2-1733618886417 heartbeating to localhost/127.0.0.1:34853 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T00:48:23,180 WARN [BP-791727382-172.17.0.2-1733618886417 heartbeating to localhost/127.0.0.1:34853 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-791727382-172.17.0.2-1733618886417 (Datanode Uuid e89283f3-cf6c-4c07-84c2-710371276ec2) service to localhost/127.0.0.1:34853 2024-12-08T00:48:23,180 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T00:48:23,181 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T00:48:23,181 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/cluster_e74a2886-159c-d840-b9ca-6af3ceb02e7c/data/data5/current/BP-791727382-172.17.0.2-1733618886417 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T00:48:23,182 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/cluster_e74a2886-159c-d840-b9ca-6af3ceb02e7c/data/data6/current/BP-791727382-172.17.0.2-1733618886417 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T00:48:23,182 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T00:48:23,749 INFO [regionserver/0f983e3e5be1:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:24,381 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:25,134 WARN [regionserver/0f983e3e5be1:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33451,DS-fb1c61b5-07ec-4bb6-9b70-43af2b3fbbb9,DISK]] 2024-12-08T00:48:25,135 INFO [regionserver/0f983e3e5be1:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:25,135 DEBUG [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 0f983e3e5be1%2C33993%2C1733618888281:(num 1733618901124) roll requested 2024-12-08T00:48:25,136 INFO [regionserver/0f983e3e5be1:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0f983e3e5be1%2C33993%2C1733618888281.1733618905135 2024-12-08T00:48:25,140 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.1733618888829 after 4006ms 2024-12-08T00:48:25,143 WARN [Thread-910 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741839_1021 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:25,143 WARN [Thread-910 {}] hdfs.DataStreamer(1731): Error Recovery for BP-791727382-172.17.0.2-1733618886417:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK], DatanodeInfoWithStorage[127.0.0.1:36435,DS-c01c0122-359b-4f15-840f-3c1bed6f09a4,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK]) is bad. 2024-12-08T00:48:25,143 WARN [Thread-910 {}] hdfs.DataStreamer(1850): Abandoning BP-791727382-172.17.0.2-1733618886417:blk_1073741839_1021 2024-12-08T00:48:25,145 WARN [Thread-910 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK] 2024-12-08T00:48:25,148 WARN [Thread-910 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1022 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:25,148 WARN [Thread-910 {}] hdfs.DataStreamer(1731): Error Recovery for BP-791727382-172.17.0.2-1733618886417:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36435,DS-c01c0122-359b-4f15-840f-3c1bed6f09a4,DISK], DatanodeInfoWithStorage[127.0.0.1:37879,DS-796f6687-3a76-48ad-a50d-1d0026d41077,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36435,DS-c01c0122-359b-4f15-840f-3c1bed6f09a4,DISK]) is bad. 2024-12-08T00:48:25,148 WARN [Thread-910 {}] hdfs.DataStreamer(1850): Abandoning BP-791727382-172.17.0.2-1733618886417:blk_1073741840_1022 2024-12-08T00:48:25,149 WARN [Thread-910 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36435,DS-c01c0122-359b-4f15-840f-3c1bed6f09a4,DISK] 2024-12-08T00:48:25,150 WARN [Thread-910 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741841_1023 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:25,150 WARN [Thread-910 {}] hdfs.DataStreamer(1731): Error Recovery for BP-791727382-172.17.0.2-1733618886417:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37879,DS-796f6687-3a76-48ad-a50d-1d0026d41077,DISK], DatanodeInfoWithStorage[127.0.0.1:33451,DS-fb1c61b5-07ec-4bb6-9b70-43af2b3fbbb9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37879,DS-796f6687-3a76-48ad-a50d-1d0026d41077,DISK]) is bad. 2024-12-08T00:48:25,150 WARN [Thread-910 {}] hdfs.DataStreamer(1850): Abandoning BP-791727382-172.17.0.2-1733618886417:blk_1073741841_1023 2024-12-08T00:48:25,151 WARN [Thread-910 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37879,DS-796f6687-3a76-48ad-a50d-1d0026d41077,DISK] 2024-12-08T00:48:25,155 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:25,155 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:25,155 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:25,155 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:25,155 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:25,156 INFO [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.1733618901124 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.1733618905135 2024-12-08T00:48:25,157 DEBUG [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46073:46073),(127.0.0.1/127.0.0.1:36397:36397)] 2024-12-08T00:48:25,157 DEBUG [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.1733618888829 is not closed yet, will try archiving it next time 2024-12-08T00:48:25,157 DEBUG [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.1733618901124 is not closed yet, will try archiving it next time 2024-12-08T00:48:25,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33451 is added to blk_1073741838_1020 (size=2431) 2024-12-08T00:48:25,190 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-08T00:48:25,559 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.1733618888829 is not closed yet, will try archiving it next time 2024-12-08T00:48:25,750 INFO [regionserver/0f983e3e5be1:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:26,382 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:27,157 INFO [regionserver/0f983e3e5be1:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:27,196 WARN [ResponseProcessor for block BP-791727382-172.17.0.2-1733618886417:blk_1073741842_1024 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-791727382-172.17.0.2-1733618886417:blk_1073741842_1024 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:27,196 WARN [DataStreamer for file /user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.1733618905135 block BP-791727382-172.17.0.2-1733618886417:blk_1073741842_1024 {}] hdfs.DataStreamer(1731): Error Recovery for BP-791727382-172.17.0.2-1733618886417:blk_1073741842_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33451,DS-fb1c61b5-07ec-4bb6-9b70-43af2b3fbbb9,DISK], DatanodeInfoWithStorage[127.0.0.1:36109,DS-958b2e8f-e0e1-44dc-a03b-46f8f4aa8441,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33451,DS-fb1c61b5-07ec-4bb6-9b70-43af2b3fbbb9,DISK]) is bad. 2024-12-08T00:48:27,196 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1208211810_22 at /127.0.0.1:36490 [Receiving block BP-791727382-172.17.0.2-1733618886417:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:33451:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36490 dst: /127.0.0.1:33451 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:48:27,197 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1208211810_22 at /127.0.0.1:43806 [Receiving block BP-791727382-172.17.0.2-1733618886417:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:36109:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43806 dst: /127.0.0.1:36109 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:48:27,198 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@33f41830{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T00:48:27,198 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c16d06a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T00:48:27,198 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T00:48:27,198 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@74c0fccc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T00:48:27,198 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@54916ccd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/hadoop.log.dir/,STOPPED} 2024-12-08T00:48:27,199 WARN [BP-791727382-172.17.0.2-1733618886417 heartbeating to localhost/127.0.0.1:34853 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T00:48:27,199 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T00:48:27,199 WARN [BP-791727382-172.17.0.2-1733618886417 heartbeating to localhost/127.0.0.1:34853 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-791727382-172.17.0.2-1733618886417 (Datanode Uuid a687aeef-559d-4266-ae5f-7c18617a39af) service to localhost/127.0.0.1:34853 2024-12-08T00:48:27,199 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T00:48:27,200 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/cluster_e74a2886-159c-d840-b9ca-6af3ceb02e7c/data/data7/current/BP-791727382-172.17.0.2-1733618886417 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T00:48:27,200 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/cluster_e74a2886-159c-d840-b9ca-6af3ceb02e7c/data/data8/current/BP-791727382-172.17.0.2-1733618886417 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T00:48:27,200 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T00:48:27,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33993 {}] regionserver.HRegion(8855): Flush requested on 8eb80a1ef3b100de075d28e6732c96d0 2024-12-08T00:48:27,207 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8eb80a1ef3b100de075d28e6732c96d0 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-08T00:48:27,227 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/.tmp/info/024b77978c2843a28ea6c9703e20f30a is 1080, key is row0002/info:/1733618903184/Put/seqid=0 2024-12-08T00:48:27,228 WARN [Thread-919 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1026 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:27,229 WARN [Thread-919 {}] hdfs.DataStreamer(1731): Error Recovery for BP-791727382-172.17.0.2-1733618886417:blk_1073741843_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37879,DS-796f6687-3a76-48ad-a50d-1d0026d41077,DISK], DatanodeInfoWithStorage[127.0.0.1:36435,DS-c01c0122-359b-4f15-840f-3c1bed6f09a4,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37879,DS-796f6687-3a76-48ad-a50d-1d0026d41077,DISK]) is bad. 2024-12-08T00:48:27,229 WARN [Thread-919 {}] hdfs.DataStreamer(1850): Abandoning BP-791727382-172.17.0.2-1733618886417:blk_1073741843_1026 2024-12-08T00:48:27,229 WARN [Thread-919 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37879,DS-796f6687-3a76-48ad-a50d-1d0026d41077,DISK] 2024-12-08T00:48:27,231 WARN [Thread-919 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36435 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:27,231 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1208211810_22 at /127.0.0.1:43818 [Receiving block BP-791727382-172.17.0.2-1733618886417:blk_1073741844_1027] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/cluster_e74a2886-159c-d840-b9ca-6af3ceb02e7c/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/cluster_e74a2886-159c-d840-b9ca-6af3ceb02e7c/data/data10]'}, localName='127.0.0.1:36109', datanodeUuid='cab2c1f4-0b38-495b-81c5-dd7bc55561eb', xmitsInProgress=0}:Exception transferring block BP-791727382-172.17.0.2-1733618886417:blk_1073741844_1027 to mirror 127.0.0.1:36435 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:48:27,231 WARN [Thread-919 {}] hdfs.DataStreamer(1731): Error Recovery for BP-791727382-172.17.0.2-1733618886417:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36109,DS-958b2e8f-e0e1-44dc-a03b-46f8f4aa8441,DISK], DatanodeInfoWithStorage[127.0.0.1:36435,DS-c01c0122-359b-4f15-840f-3c1bed6f09a4,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36435,DS-c01c0122-359b-4f15-840f-3c1bed6f09a4,DISK]) is bad. 2024-12-08T00:48:27,231 WARN [Thread-919 {}] hdfs.DataStreamer(1850): Abandoning BP-791727382-172.17.0.2-1733618886417:blk_1073741844_1027 2024-12-08T00:48:27,231 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1208211810_22 at /127.0.0.1:43818 [Receiving block BP-791727382-172.17.0.2-1733618886417:blk_1073741844_1027] {}] datanode.BlockReceiver(316): Block 1073741844 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-08T00:48:27,232 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1208211810_22 at /127.0.0.1:43818 [Receiving block BP-791727382-172.17.0.2-1733618886417:blk_1073741844_1027] {}] datanode.DataXceiver(331): 127.0.0.1:36109:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43818 dst: /127.0.0.1:36109 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:48:27,232 WARN [Thread-919 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36435,DS-c01c0122-359b-4f15-840f-3c1bed6f09a4,DISK] 2024-12-08T00:48:27,233 WARN [Thread-919 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:27,233 WARN [Thread-919 {}] hdfs.DataStreamer(1731): Error Recovery for BP-791727382-172.17.0.2-1733618886417:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK], DatanodeInfoWithStorage[127.0.0.1:33451,DS-fb1c61b5-07ec-4bb6-9b70-43af2b3fbbb9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK]) is bad. 2024-12-08T00:48:27,233 WARN [Thread-919 {}] hdfs.DataStreamer(1850): Abandoning BP-791727382-172.17.0.2-1733618886417:blk_1073741845_1028 2024-12-08T00:48:27,234 WARN [Thread-919 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK] 2024-12-08T00:48:27,235 WARN [Thread-919 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:27,235 WARN [Thread-919 {}] hdfs.DataStreamer(1731): Error Recovery for BP-791727382-172.17.0.2-1733618886417:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33451,DS-fb1c61b5-07ec-4bb6-9b70-43af2b3fbbb9,DISK], DatanodeInfoWithStorage[127.0.0.1:36109,DS-958b2e8f-e0e1-44dc-a03b-46f8f4aa8441,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33451,DS-fb1c61b5-07ec-4bb6-9b70-43af2b3fbbb9,DISK]) is bad. 2024-12-08T00:48:27,235 WARN [Thread-919 {}] hdfs.DataStreamer(1850): Abandoning BP-791727382-172.17.0.2-1733618886417:blk_1073741846_1029 2024-12-08T00:48:27,235 WARN [Thread-919 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33451,DS-fb1c61b5-07ec-4bb6-9b70-43af2b3fbbb9,DISK] 2024-12-08T00:48:27,236 WARN [IPC Server handler 1 on default port 34853 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-08T00:48:27,236 WARN [IPC Server handler 1 on default port 34853 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-08T00:48:27,236 WARN [IPC Server handler 1 on default port 34853 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-08T00:48:27,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36109 is added to blk_1073741847_1030 (size=10347) 2024-12-08T00:48:27,640 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/.tmp/info/024b77978c2843a28ea6c9703e20f30a 2024-12-08T00:48:27,647 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/.tmp/info/024b77978c2843a28ea6c9703e20f30a as hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/info/024b77978c2843a28ea6c9703e20f30a 2024-12-08T00:48:27,652 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/info/024b77978c2843a28ea6c9703e20f30a, entries=5, sequenceid=11, filesize=10.1 K 2024-12-08T00:48:27,653 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for 8eb80a1ef3b100de075d28e6732c96d0 in 446ms, sequenceid=11, compaction requested=false 2024-12-08T00:48:27,653 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8eb80a1ef3b100de075d28e6732c96d0: 2024-12-08T00:48:27,750 INFO [regionserver/0f983e3e5be1:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:27,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33993 {}] regionserver.HRegion(8855): Flush requested on 8eb80a1ef3b100de075d28e6732c96d0 2024-12-08T00:48:27,834 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8eb80a1ef3b100de075d28e6732c96d0 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-12-08T00:48:27,841 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/.tmp/info/19cb33369ada4e769f05fc657a85049b is 1080, key is row0007/info:/1733618907208/Put/seqid=0 2024-12-08T00:48:27,844 WARN [Thread-925 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37879 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:27,844 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1208211810_22 at /127.0.0.1:43832 [Receiving block BP-791727382-172.17.0.2-1733618886417:blk_1073741848_1031] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/cluster_e74a2886-159c-d840-b9ca-6af3ceb02e7c/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/cluster_e74a2886-159c-d840-b9ca-6af3ceb02e7c/data/data10]'}, localName='127.0.0.1:36109', datanodeUuid='cab2c1f4-0b38-495b-81c5-dd7bc55561eb', xmitsInProgress=0}:Exception transferring block BP-791727382-172.17.0.2-1733618886417:blk_1073741848_1031 to mirror 127.0.0.1:37879 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:48:27,844 WARN [Thread-925 {}] hdfs.DataStreamer(1731): Error Recovery for BP-791727382-172.17.0.2-1733618886417:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36109,DS-958b2e8f-e0e1-44dc-a03b-46f8f4aa8441,DISK], DatanodeInfoWithStorage[127.0.0.1:37879,DS-796f6687-3a76-48ad-a50d-1d0026d41077,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37879,DS-796f6687-3a76-48ad-a50d-1d0026d41077,DISK]) is bad. 2024-12-08T00:48:27,845 WARN [Thread-925 {}] hdfs.DataStreamer(1850): Abandoning BP-791727382-172.17.0.2-1733618886417:blk_1073741848_1031 2024-12-08T00:48:27,845 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1208211810_22 at /127.0.0.1:43832 [Receiving block BP-791727382-172.17.0.2-1733618886417:blk_1073741848_1031] {}] datanode.BlockReceiver(316): Block 1073741848 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-08T00:48:27,845 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1208211810_22 at /127.0.0.1:43832 [Receiving block BP-791727382-172.17.0.2-1733618886417:blk_1073741848_1031] {}] datanode.DataXceiver(331): 127.0.0.1:36109:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43832 dst: /127.0.0.1:36109 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:48:27,845 WARN [Thread-925 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37879,DS-796f6687-3a76-48ad-a50d-1d0026d41077,DISK] 2024-12-08T00:48:27,847 WARN [Thread-925 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:27,847 WARN [Thread-925 {}] hdfs.DataStreamer(1731): Error Recovery for BP-791727382-172.17.0.2-1733618886417:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33451,DS-fb1c61b5-07ec-4bb6-9b70-43af2b3fbbb9,DISK], DatanodeInfoWithStorage[127.0.0.1:36435,DS-c01c0122-359b-4f15-840f-3c1bed6f09a4,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33451,DS-fb1c61b5-07ec-4bb6-9b70-43af2b3fbbb9,DISK]) is bad. 2024-12-08T00:48:27,847 WARN [Thread-925 {}] hdfs.DataStreamer(1850): Abandoning BP-791727382-172.17.0.2-1733618886417:blk_1073741849_1032 2024-12-08T00:48:27,847 WARN [Thread-925 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33451,DS-fb1c61b5-07ec-4bb6-9b70-43af2b3fbbb9,DISK] 2024-12-08T00:48:27,850 WARN [Thread-925 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45779 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:27,850 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1208211810_22 at /127.0.0.1:43844 [Receiving block BP-791727382-172.17.0.2-1733618886417:blk_1073741850_1033] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/cluster_e74a2886-159c-d840-b9ca-6af3ceb02e7c/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/cluster_e74a2886-159c-d840-b9ca-6af3ceb02e7c/data/data10]'}, localName='127.0.0.1:36109', datanodeUuid='cab2c1f4-0b38-495b-81c5-dd7bc55561eb', xmitsInProgress=0}:Exception transferring block BP-791727382-172.17.0.2-1733618886417:blk_1073741850_1033 to mirror 127.0.0.1:45779 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:48:27,850 WARN [Thread-925 {}] hdfs.DataStreamer(1731): Error Recovery for BP-791727382-172.17.0.2-1733618886417:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36109,DS-958b2e8f-e0e1-44dc-a03b-46f8f4aa8441,DISK], DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK]) is bad. 2024-12-08T00:48:27,850 WARN [Thread-925 {}] hdfs.DataStreamer(1850): Abandoning BP-791727382-172.17.0.2-1733618886417:blk_1073741850_1033 2024-12-08T00:48:27,850 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1208211810_22 at /127.0.0.1:43844 [Receiving block BP-791727382-172.17.0.2-1733618886417:blk_1073741850_1033] {}] datanode.BlockReceiver(316): Block 1073741850 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-08T00:48:27,850 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1208211810_22 at /127.0.0.1:43844 [Receiving block BP-791727382-172.17.0.2-1733618886417:blk_1073741850_1033] {}] datanode.DataXceiver(331): 127.0.0.1:36109:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43844 dst: /127.0.0.1:36109 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:48:27,851 WARN [Thread-925 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK] 2024-12-08T00:48:27,853 WARN [Thread-925 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36435 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:27,853 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1208211810_22 at /127.0.0.1:43852 [Receiving block BP-791727382-172.17.0.2-1733618886417:blk_1073741851_1034] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/cluster_e74a2886-159c-d840-b9ca-6af3ceb02e7c/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/cluster_e74a2886-159c-d840-b9ca-6af3ceb02e7c/data/data10]'}, localName='127.0.0.1:36109', datanodeUuid='cab2c1f4-0b38-495b-81c5-dd7bc55561eb', xmitsInProgress=0}:Exception transferring block BP-791727382-172.17.0.2-1733618886417:blk_1073741851_1034 to mirror 127.0.0.1:36435 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:48:27,853 WARN [Thread-925 {}] hdfs.DataStreamer(1731): Error Recovery for BP-791727382-172.17.0.2-1733618886417:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36109,DS-958b2e8f-e0e1-44dc-a03b-46f8f4aa8441,DISK], DatanodeInfoWithStorage[127.0.0.1:36435,DS-c01c0122-359b-4f15-840f-3c1bed6f09a4,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36435,DS-c01c0122-359b-4f15-840f-3c1bed6f09a4,DISK]) is bad. 2024-12-08T00:48:27,853 WARN [Thread-925 {}] hdfs.DataStreamer(1850): Abandoning BP-791727382-172.17.0.2-1733618886417:blk_1073741851_1034 2024-12-08T00:48:27,853 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1208211810_22 at /127.0.0.1:43852 [Receiving block BP-791727382-172.17.0.2-1733618886417:blk_1073741851_1034] {}] datanode.BlockReceiver(316): Block 1073741851 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-08T00:48:27,853 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1208211810_22 at /127.0.0.1:43852 [Receiving block BP-791727382-172.17.0.2-1733618886417:blk_1073741851_1034] {}] datanode.DataXceiver(331): 127.0.0.1:36109:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43852 dst: /127.0.0.1:36109 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:48:27,854 WARN [Thread-925 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36435,DS-c01c0122-359b-4f15-840f-3c1bed6f09a4,DISK] 2024-12-08T00:48:27,855 WARN [IPC Server handler 1 on default port 34853 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-08T00:48:27,855 WARN [IPC Server handler 1 on default port 34853 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-08T00:48:27,855 WARN [IPC Server handler 1 on default port 34853 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-08T00:48:27,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36109 is added to blk_1073741852_1035 (size=12506) 2024-12-08T00:48:28,121 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@a350071[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:36109, datanodeUuid=cab2c1f4-0b38-495b-81c5-dd7bc55561eb, infoPort=36397, infoSecurePort=0, ipcPort=41757, storageInfo=lv=-57;cid=testClusterID;nsid=1708086423;c=1733618886417):Failed to transfer BP-791727382-172.17.0.2-1733618886417:blk_1073741852_1035 to 127.0.0.1:36435 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:48:28,121 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@2882550b[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:36109, datanodeUuid=cab2c1f4-0b38-495b-81c5-dd7bc55561eb, infoPort=36397, infoSecurePort=0, ipcPort=41757, storageInfo=lv=-57;cid=testClusterID;nsid=1708086423;c=1733618886417):Failed to transfer BP-791727382-172.17.0.2-1733618886417:blk_1073741847_1030 to 127.0.0.1:45779 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:48:28,259 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/.tmp/info/19cb33369ada4e769f05fc657a85049b 2024-12-08T00:48:28,266 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/.tmp/info/19cb33369ada4e769f05fc657a85049b as hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/info/19cb33369ada4e769f05fc657a85049b 2024-12-08T00:48:28,274 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/info/19cb33369ada4e769f05fc657a85049b, entries=7, sequenceid=24, filesize=12.2 K 2024-12-08T00:48:28,275 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for 8eb80a1ef3b100de075d28e6732c96d0 in 442ms, sequenceid=24, compaction requested=false 2024-12-08T00:48:28,276 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8eb80a1ef3b100de075d28e6732c96d0: 2024-12-08T00:48:28,276 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-12-08T00:48:28,276 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T00:48:28,276 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/info/19cb33369ada4e769f05fc657a85049b because midkey is the same as first or last row 2024-12-08T00:48:28,383 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:29,158 WARN [regionserver/0f983e3e5be1:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36109,DS-958b2e8f-e0e1-44dc-a03b-46f8f4aa8441,DISK]] 2024-12-08T00:48:29,158 INFO [regionserver/0f983e3e5be1:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:29,159 DEBUG [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 0f983e3e5be1%2C33993%2C1733618888281:(num 1733618905135) roll requested 2024-12-08T00:48:29,160 INFO [regionserver/0f983e3e5be1:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0f983e3e5be1%2C33993%2C1733618888281.1733618909159 2024-12-08T00:48:29,167 WARN [Thread-936 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:29,167 WARN [Thread-936 {}] hdfs.DataStreamer(1731): Error Recovery for BP-791727382-172.17.0.2-1733618886417:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK], DatanodeInfoWithStorage[127.0.0.1:36435,DS-c01c0122-359b-4f15-840f-3c1bed6f09a4,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK]) is bad. 2024-12-08T00:48:29,167 WARN [Thread-936 {}] hdfs.DataStreamer(1850): Abandoning BP-791727382-172.17.0.2-1733618886417:blk_1073741853_1036 2024-12-08T00:48:29,168 WARN [Thread-936 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK] 2024-12-08T00:48:29,169 WARN [Thread-936 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:29,169 WARN [Thread-936 {}] hdfs.DataStreamer(1731): Error Recovery for BP-791727382-172.17.0.2-1733618886417:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33451,DS-fb1c61b5-07ec-4bb6-9b70-43af2b3fbbb9,DISK], DatanodeInfoWithStorage[127.0.0.1:37879,DS-796f6687-3a76-48ad-a50d-1d0026d41077,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33451,DS-fb1c61b5-07ec-4bb6-9b70-43af2b3fbbb9,DISK]) is bad. 2024-12-08T00:48:29,169 WARN [Thread-936 {}] hdfs.DataStreamer(1850): Abandoning BP-791727382-172.17.0.2-1733618886417:blk_1073741854_1037 2024-12-08T00:48:29,170 WARN [Thread-936 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33451,DS-fb1c61b5-07ec-4bb6-9b70-43af2b3fbbb9,DISK] 2024-12-08T00:48:29,171 WARN [Thread-936 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:29,172 WARN [Thread-936 {}] hdfs.DataStreamer(1731): Error Recovery for BP-791727382-172.17.0.2-1733618886417:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36435,DS-c01c0122-359b-4f15-840f-3c1bed6f09a4,DISK], DatanodeInfoWithStorage[127.0.0.1:37879,DS-796f6687-3a76-48ad-a50d-1d0026d41077,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36435,DS-c01c0122-359b-4f15-840f-3c1bed6f09a4,DISK]) is bad. 2024-12-08T00:48:29,172 WARN [Thread-936 {}] hdfs.DataStreamer(1850): Abandoning BP-791727382-172.17.0.2-1733618886417:blk_1073741855_1038 2024-12-08T00:48:29,172 WARN [Thread-936 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36435,DS-c01c0122-359b-4f15-840f-3c1bed6f09a4,DISK] 2024-12-08T00:48:29,174 WARN [Thread-936 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:29,174 WARN [Thread-936 {}] hdfs.DataStreamer(1731): Error Recovery for BP-791727382-172.17.0.2-1733618886417:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37879,DS-796f6687-3a76-48ad-a50d-1d0026d41077,DISK], DatanodeInfoWithStorage[127.0.0.1:36109,DS-958b2e8f-e0e1-44dc-a03b-46f8f4aa8441,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37879,DS-796f6687-3a76-48ad-a50d-1d0026d41077,DISK]) is bad. 2024-12-08T00:48:29,174 WARN [Thread-936 {}] hdfs.DataStreamer(1850): Abandoning BP-791727382-172.17.0.2-1733618886417:blk_1073741856_1039 2024-12-08T00:48:29,175 WARN [Thread-936 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37879,DS-796f6687-3a76-48ad-a50d-1d0026d41077,DISK] 2024-12-08T00:48:29,176 WARN [IPC Server handler 3 on default port 34853 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-08T00:48:29,176 WARN [IPC Server handler 3 on default port 34853 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-08T00:48:29,176 WARN [IPC Server handler 3 on default port 34853 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-08T00:48:29,179 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:29,179 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:29,179 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:29,179 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:29,179 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:29,179 INFO [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.1733618905135 with entries=25, filesize=25.38 KB; new WAL /user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.1733618909159 2024-12-08T00:48:29,180 DEBUG [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36397:36397)] 2024-12-08T00:48:29,180 DEBUG [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.1733618888829 is not closed yet, will try archiving it next time 2024-12-08T00:48:29,180 DEBUG [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.1733618905135 is not closed yet, will try archiving it next time 2024-12-08T00:48:29,180 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.1733618901124 to hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/oldWALs/0f983e3e5be1%2C33993%2C1733618888281.1733618901124 2024-12-08T00:48:29,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36109 is added to blk_1073741842_1025 (size=25992) 2024-12-08T00:48:29,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33993 {}] regionserver.HRegion(8855): Flush requested on 8eb80a1ef3b100de075d28e6732c96d0 2024-12-08T00:48:29,266 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8eb80a1ef3b100de075d28e6732c96d0 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-08T00:48:29,273 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/.tmp/info/1a657df4d64c4e6782ef675f1a8fb8ed is 1079, key is tmprow/info:/1733618909263/Put/seqid=0 2024-12-08T00:48:29,275 WARN [Thread-940 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:29,275 WARN [Thread-940 {}] hdfs.DataStreamer(1731): Error Recovery for BP-791727382-172.17.0.2-1733618886417:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33451,DS-fb1c61b5-07ec-4bb6-9b70-43af2b3fbbb9,DISK], DatanodeInfoWithStorage[127.0.0.1:37879,DS-796f6687-3a76-48ad-a50d-1d0026d41077,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33451,DS-fb1c61b5-07ec-4bb6-9b70-43af2b3fbbb9,DISK]) is bad. 2024-12-08T00:48:29,275 WARN [Thread-940 {}] hdfs.DataStreamer(1850): Abandoning BP-791727382-172.17.0.2-1733618886417:blk_1073741858_1041 2024-12-08T00:48:29,276 WARN [Thread-940 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33451,DS-fb1c61b5-07ec-4bb6-9b70-43af2b3fbbb9,DISK] 2024-12-08T00:48:29,277 WARN [Thread-940 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:29,277 WARN [Thread-940 {}] hdfs.DataStreamer(1731): Error Recovery for BP-791727382-172.17.0.2-1733618886417:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36435,DS-c01c0122-359b-4f15-840f-3c1bed6f09a4,DISK], DatanodeInfoWithStorage[127.0.0.1:36109,DS-958b2e8f-e0e1-44dc-a03b-46f8f4aa8441,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36435,DS-c01c0122-359b-4f15-840f-3c1bed6f09a4,DISK]) is bad. 2024-12-08T00:48:29,277 WARN [Thread-940 {}] hdfs.DataStreamer(1850): Abandoning BP-791727382-172.17.0.2-1733618886417:blk_1073741859_1042 2024-12-08T00:48:29,278 WARN [Thread-940 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36435,DS-c01c0122-359b-4f15-840f-3c1bed6f09a4,DISK] 2024-12-08T00:48:29,279 WARN [Thread-940 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:29,279 WARN [Thread-940 {}] hdfs.DataStreamer(1731): Error Recovery for BP-791727382-172.17.0.2-1733618886417:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK], DatanodeInfoWithStorage[127.0.0.1:37879,DS-796f6687-3a76-48ad-a50d-1d0026d41077,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK]) is bad. 2024-12-08T00:48:29,279 WARN [Thread-940 {}] hdfs.DataStreamer(1850): Abandoning BP-791727382-172.17.0.2-1733618886417:blk_1073741860_1043 2024-12-08T00:48:29,280 WARN [Thread-940 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK] 2024-12-08T00:48:29,282 WARN [Thread-940 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37879 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:29,282 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1208211810_22 at /127.0.0.1:43872 [Receiving block BP-791727382-172.17.0.2-1733618886417:blk_1073741861_1044] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/cluster_e74a2886-159c-d840-b9ca-6af3ceb02e7c/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/cluster_e74a2886-159c-d840-b9ca-6af3ceb02e7c/data/data10]'}, localName='127.0.0.1:36109', datanodeUuid='cab2c1f4-0b38-495b-81c5-dd7bc55561eb', xmitsInProgress=0}:Exception transferring block BP-791727382-172.17.0.2-1733618886417:blk_1073741861_1044 to mirror 127.0.0.1:37879 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:48:29,282 WARN [Thread-940 {}] hdfs.DataStreamer(1731): Error Recovery for BP-791727382-172.17.0.2-1733618886417:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36109,DS-958b2e8f-e0e1-44dc-a03b-46f8f4aa8441,DISK], DatanodeInfoWithStorage[127.0.0.1:37879,DS-796f6687-3a76-48ad-a50d-1d0026d41077,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37879,DS-796f6687-3a76-48ad-a50d-1d0026d41077,DISK]) is bad. 2024-12-08T00:48:29,282 WARN [Thread-940 {}] hdfs.DataStreamer(1850): Abandoning BP-791727382-172.17.0.2-1733618886417:blk_1073741861_1044 2024-12-08T00:48:29,282 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1208211810_22 at /127.0.0.1:43872 [Receiving block BP-791727382-172.17.0.2-1733618886417:blk_1073741861_1044] {}] datanode.BlockReceiver(316): Block 1073741861 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-08T00:48:29,283 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1208211810_22 at /127.0.0.1:43872 [Receiving block BP-791727382-172.17.0.2-1733618886417:blk_1073741861_1044] {}] datanode.DataXceiver(331): 127.0.0.1:36109:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43872 dst: /127.0.0.1:36109 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:48:29,283 WARN [Thread-940 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37879,DS-796f6687-3a76-48ad-a50d-1d0026d41077,DISK] 2024-12-08T00:48:29,284 WARN [IPC Server handler 1 on default port 34853 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-08T00:48:29,284 WARN [IPC Server handler 1 on default port 34853 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-08T00:48:29,284 WARN [IPC Server handler 1 on default port 34853 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-08T00:48:29,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36109 is added to blk_1073741862_1045 (size=6027) 2024-12-08T00:48:29,583 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.1733618888829 is not closed yet, will try archiving it next time 2024-12-08T00:48:29,690 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/.tmp/info/1a657df4d64c4e6782ef675f1a8fb8ed 2024-12-08T00:48:29,699 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/.tmp/info/1a657df4d64c4e6782ef675f1a8fb8ed as hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/info/1a657df4d64c4e6782ef675f1a8fb8ed 2024-12-08T00:48:29,705 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/info/1a657df4d64c4e6782ef675f1a8fb8ed, entries=1, sequenceid=34, filesize=5.9 K 2024-12-08T00:48:29,706 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 8eb80a1ef3b100de075d28e6732c96d0 in 441ms, sequenceid=34, compaction requested=true 2024-12-08T00:48:29,706 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8eb80a1ef3b100de075d28e6732c96d0: 2024-12-08T00:48:29,706 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-12-08T00:48:29,706 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T00:48:29,706 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/info/19cb33369ada4e769f05fc657a85049b because midkey is the same as first or last row 2024-12-08T00:48:29,707 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8eb80a1ef3b100de075d28e6732c96d0:info, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:48:29,707 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:48:29,707 DEBUG [RS:0;0f983e3e5be1:33993-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:48:29,708 DEBUG [RS:0;0f983e3e5be1:33993-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:48:29,709 DEBUG [RS:0;0f983e3e5be1:33993-shortCompactions-0 {}] regionserver.HStore(1541): 8eb80a1ef3b100de075d28e6732c96d0/info is initiating minor compaction (all files) 2024-12-08T00:48:29,709 INFO [RS:0;0f983e3e5be1:33993-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 8eb80a1ef3b100de075d28e6732c96d0/info in TestLogRolling-testLogRollOnDatanodeDeath,,1733618889776.8eb80a1ef3b100de075d28e6732c96d0. 2024-12-08T00:48:29,709 INFO [RS:0;0f983e3e5be1:33993-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/info/024b77978c2843a28ea6c9703e20f30a, hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/info/19cb33369ada4e769f05fc657a85049b, hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/info/1a657df4d64c4e6782ef675f1a8fb8ed] into tmpdir=hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/.tmp, totalSize=28.2 K 2024-12-08T00:48:29,709 DEBUG [RS:0;0f983e3e5be1:33993-shortCompactions-0 {}] compactions.Compactor(225): Compacting 024b77978c2843a28ea6c9703e20f30a, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733618903184 2024-12-08T00:48:29,710 DEBUG [RS:0;0f983e3e5be1:33993-shortCompactions-0 {}] compactions.Compactor(225): Compacting 19cb33369ada4e769f05fc657a85049b, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1733618907208 2024-12-08T00:48:29,710 DEBUG [RS:0;0f983e3e5be1:33993-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1a657df4d64c4e6782ef675f1a8fb8ed, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1733618909263 2024-12-08T00:48:29,723 INFO [RS:0;0f983e3e5be1:33993-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8eb80a1ef3b100de075d28e6732c96d0#info#compaction#21 average throughput is 12.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:48:29,724 DEBUG [RS:0;0f983e3e5be1:33993-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/.tmp/info/017f5286c5e64f5cae412d722f8d4561 is 1080, key is row0002/info:/1733618903184/Put/seqid=0 2024-12-08T00:48:29,725 WARN [Thread-947 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:29,726 WARN [Thread-947 {}] hdfs.DataStreamer(1731): Error Recovery for BP-791727382-172.17.0.2-1733618886417:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK], DatanodeInfoWithStorage[127.0.0.1:33451,DS-fb1c61b5-07ec-4bb6-9b70-43af2b3fbbb9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK]) is bad. 2024-12-08T00:48:29,726 WARN [Thread-947 {}] hdfs.DataStreamer(1850): Abandoning BP-791727382-172.17.0.2-1733618886417:blk_1073741863_1046 2024-12-08T00:48:29,726 WARN [Thread-947 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK] 2024-12-08T00:48:29,728 WARN [Thread-947 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36435 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:29,728 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1208211810_22 at /127.0.0.1:43916 [Receiving block BP-791727382-172.17.0.2-1733618886417:blk_1073741864_1047] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/cluster_e74a2886-159c-d840-b9ca-6af3ceb02e7c/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/cluster_e74a2886-159c-d840-b9ca-6af3ceb02e7c/data/data10]'}, localName='127.0.0.1:36109', datanodeUuid='cab2c1f4-0b38-495b-81c5-dd7bc55561eb', xmitsInProgress=0}:Exception transferring block BP-791727382-172.17.0.2-1733618886417:blk_1073741864_1047 to mirror 127.0.0.1:36435 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:48:29,728 WARN [Thread-947 {}] hdfs.DataStreamer(1731): Error Recovery for BP-791727382-172.17.0.2-1733618886417:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36109,DS-958b2e8f-e0e1-44dc-a03b-46f8f4aa8441,DISK], DatanodeInfoWithStorage[127.0.0.1:36435,DS-c01c0122-359b-4f15-840f-3c1bed6f09a4,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36435,DS-c01c0122-359b-4f15-840f-3c1bed6f09a4,DISK]) is bad. 2024-12-08T00:48:29,728 WARN [Thread-947 {}] hdfs.DataStreamer(1850): Abandoning BP-791727382-172.17.0.2-1733618886417:blk_1073741864_1047 2024-12-08T00:48:29,728 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1208211810_22 at /127.0.0.1:43916 [Receiving block BP-791727382-172.17.0.2-1733618886417:blk_1073741864_1047] {}] datanode.BlockReceiver(316): Block 1073741864 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-08T00:48:29,728 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1208211810_22 at /127.0.0.1:43916 [Receiving block BP-791727382-172.17.0.2-1733618886417:blk_1073741864_1047] {}] datanode.DataXceiver(331): 127.0.0.1:36109:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43916 dst: /127.0.0.1:36109 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:48:29,729 WARN [Thread-947 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36435,DS-c01c0122-359b-4f15-840f-3c1bed6f09a4,DISK] 2024-12-08T00:48:29,730 WARN [Thread-947 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:29,730 WARN [Thread-947 {}] hdfs.DataStreamer(1731): Error Recovery for BP-791727382-172.17.0.2-1733618886417:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33451,DS-fb1c61b5-07ec-4bb6-9b70-43af2b3fbbb9,DISK], DatanodeInfoWithStorage[127.0.0.1:37879,DS-796f6687-3a76-48ad-a50d-1d0026d41077,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33451,DS-fb1c61b5-07ec-4bb6-9b70-43af2b3fbbb9,DISK]) is bad. 2024-12-08T00:48:29,730 WARN [Thread-947 {}] hdfs.DataStreamer(1850): Abandoning BP-791727382-172.17.0.2-1733618886417:blk_1073741865_1048 2024-12-08T00:48:29,731 WARN [Thread-947 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33451,DS-fb1c61b5-07ec-4bb6-9b70-43af2b3fbbb9,DISK] 2024-12-08T00:48:29,733 WARN [Thread-947 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741866_1049 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37879 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:29,733 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1208211810_22 at /127.0.0.1:43932 [Receiving block BP-791727382-172.17.0.2-1733618886417:blk_1073741866_1049] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/cluster_e74a2886-159c-d840-b9ca-6af3ceb02e7c/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/cluster_e74a2886-159c-d840-b9ca-6af3ceb02e7c/data/data10]'}, localName='127.0.0.1:36109', datanodeUuid='cab2c1f4-0b38-495b-81c5-dd7bc55561eb', xmitsInProgress=0}:Exception transferring block BP-791727382-172.17.0.2-1733618886417:blk_1073741866_1049 to mirror 127.0.0.1:37879 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:48:29,733 WARN [Thread-947 {}] hdfs.DataStreamer(1731): Error Recovery for BP-791727382-172.17.0.2-1733618886417:blk_1073741866_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36109,DS-958b2e8f-e0e1-44dc-a03b-46f8f4aa8441,DISK], DatanodeInfoWithStorage[127.0.0.1:37879,DS-796f6687-3a76-48ad-a50d-1d0026d41077,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37879,DS-796f6687-3a76-48ad-a50d-1d0026d41077,DISK]) is bad. 2024-12-08T00:48:29,733 WARN [Thread-947 {}] hdfs.DataStreamer(1850): Abandoning BP-791727382-172.17.0.2-1733618886417:blk_1073741866_1049 2024-12-08T00:48:29,733 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1208211810_22 at /127.0.0.1:43932 [Receiving block BP-791727382-172.17.0.2-1733618886417:blk_1073741866_1049] {}] datanode.BlockReceiver(316): Block 1073741866 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-08T00:48:29,733 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1208211810_22 at /127.0.0.1:43932 [Receiving block BP-791727382-172.17.0.2-1733618886417:blk_1073741866_1049] {}] datanode.DataXceiver(331): 127.0.0.1:36109:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43932 dst: /127.0.0.1:36109 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:48:29,734 WARN [Thread-947 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37879,DS-796f6687-3a76-48ad-a50d-1d0026d41077,DISK] 2024-12-08T00:48:29,734 WARN [IPC Server handler 0 on default port 34853 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-08T00:48:29,734 WARN [IPC Server handler 0 on default port 34853 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-08T00:48:29,734 WARN [IPC Server handler 0 on default port 34853 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-08T00:48:29,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36109 is added to blk_1073741867_1050 (size=17994) 2024-12-08T00:48:29,751 INFO [regionserver/0f983e3e5be1:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:30,152 DEBUG [RS:0;0f983e3e5be1:33993-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/.tmp/info/017f5286c5e64f5cae412d722f8d4561 as hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/info/017f5286c5e64f5cae412d722f8d4561 2024-12-08T00:48:30,158 INFO [RS:0;0f983e3e5be1:33993-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 8eb80a1ef3b100de075d28e6732c96d0/info of 8eb80a1ef3b100de075d28e6732c96d0 into 017f5286c5e64f5cae412d722f8d4561(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:48:30,158 DEBUG [RS:0;0f983e3e5be1:33993-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 8eb80a1ef3b100de075d28e6732c96d0: 2024-12-08T00:48:30,158 INFO [RS:0;0f983e3e5be1:33993-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1733618889776.8eb80a1ef3b100de075d28e6732c96d0., storeName=8eb80a1ef3b100de075d28e6732c96d0/info, priority=13, startTime=1733618909707; duration=0sec 2024-12-08T00:48:30,158 DEBUG [RS:0;0f983e3e5be1:33993-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-12-08T00:48:30,159 DEBUG [RS:0;0f983e3e5be1:33993-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T00:48:30,159 DEBUG [RS:0;0f983e3e5be1:33993-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/info/017f5286c5e64f5cae412d722f8d4561 because midkey is the same as first or last row 2024-12-08T00:48:30,159 DEBUG [RS:0;0f983e3e5be1:33993-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-12-08T00:48:30,159 DEBUG [RS:0;0f983e3e5be1:33993-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T00:48:30,159 DEBUG [RS:0;0f983e3e5be1:33993-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/info/017f5286c5e64f5cae412d722f8d4561 because midkey is the same as first or last row 2024-12-08T00:48:30,159 DEBUG [RS:0;0f983e3e5be1:33993-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-12-08T00:48:30,159 DEBUG [RS:0;0f983e3e5be1:33993-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T00:48:30,159 DEBUG [RS:0;0f983e3e5be1:33993-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/info/017f5286c5e64f5cae412d722f8d4561 because midkey is the same as first or last row 2024-12-08T00:48:30,159 DEBUG [RS:0;0f983e3e5be1:33993-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:48:30,159 DEBUG [RS:0;0f983e3e5be1:33993-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8eb80a1ef3b100de075d28e6732c96d0:info 2024-12-08T00:48:30,383 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:30,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33993 {}] regionserver.HRegion(8855): Flush requested on 8eb80a1ef3b100de075d28e6732c96d0 2024-12-08T00:48:30,698 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8eb80a1ef3b100de075d28e6732c96d0 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-08T00:48:30,708 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/.tmp/info/d4f12b91364a478da9e5585debf7058a is 1079, key is tmprow/info:/1733618910695/Put/seqid=0 2024-12-08T00:48:30,709 WARN [Thread-953 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741868_1051 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:30,710 WARN [Thread-953 {}] hdfs.DataStreamer(1731): Error Recovery for BP-791727382-172.17.0.2-1733618886417:blk_1073741868_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK], DatanodeInfoWithStorage[127.0.0.1:36435,DS-c01c0122-359b-4f15-840f-3c1bed6f09a4,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK]) is bad. 2024-12-08T00:48:30,710 WARN [Thread-953 {}] hdfs.DataStreamer(1850): Abandoning BP-791727382-172.17.0.2-1733618886417:blk_1073741868_1051 2024-12-08T00:48:30,710 WARN [Thread-953 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK] 2024-12-08T00:48:30,711 WARN [Thread-953 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:30,712 WARN [Thread-953 {}] hdfs.DataStreamer(1731): Error Recovery for BP-791727382-172.17.0.2-1733618886417:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37879,DS-796f6687-3a76-48ad-a50d-1d0026d41077,DISK], DatanodeInfoWithStorage[127.0.0.1:36435,DS-c01c0122-359b-4f15-840f-3c1bed6f09a4,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37879,DS-796f6687-3a76-48ad-a50d-1d0026d41077,DISK]) is bad. 2024-12-08T00:48:30,712 WARN [Thread-953 {}] hdfs.DataStreamer(1850): Abandoning BP-791727382-172.17.0.2-1733618886417:blk_1073741869_1052 2024-12-08T00:48:30,712 WARN [Thread-953 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37879,DS-796f6687-3a76-48ad-a50d-1d0026d41077,DISK] 2024-12-08T00:48:30,714 WARN [Thread-953 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:30,714 WARN [Thread-953 {}] hdfs.DataStreamer(1731): Error Recovery for BP-791727382-172.17.0.2-1733618886417:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33451,DS-fb1c61b5-07ec-4bb6-9b70-43af2b3fbbb9,DISK], DatanodeInfoWithStorage[127.0.0.1:36109,DS-958b2e8f-e0e1-44dc-a03b-46f8f4aa8441,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33451,DS-fb1c61b5-07ec-4bb6-9b70-43af2b3fbbb9,DISK]) is bad. 2024-12-08T00:48:30,714 WARN [Thread-953 {}] hdfs.DataStreamer(1850): Abandoning BP-791727382-172.17.0.2-1733618886417:blk_1073741870_1053 2024-12-08T00:48:30,715 WARN [Thread-953 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33451,DS-fb1c61b5-07ec-4bb6-9b70-43af2b3fbbb9,DISK] 2024-12-08T00:48:30,716 WARN [Thread-953 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741871_1054 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:30,716 WARN [Thread-953 {}] hdfs.DataStreamer(1731): Error Recovery for BP-791727382-172.17.0.2-1733618886417:blk_1073741871_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36435,DS-c01c0122-359b-4f15-840f-3c1bed6f09a4,DISK], DatanodeInfoWithStorage[127.0.0.1:36109,DS-958b2e8f-e0e1-44dc-a03b-46f8f4aa8441,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36435,DS-c01c0122-359b-4f15-840f-3c1bed6f09a4,DISK]) is bad. 2024-12-08T00:48:30,716 WARN [Thread-953 {}] hdfs.DataStreamer(1850): Abandoning BP-791727382-172.17.0.2-1733618886417:blk_1073741871_1054 2024-12-08T00:48:30,717 WARN [Thread-953 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36435,DS-c01c0122-359b-4f15-840f-3c1bed6f09a4,DISK] 2024-12-08T00:48:30,717 WARN [IPC Server handler 2 on default port 34853 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-08T00:48:30,717 WARN [IPC Server handler 2 on default port 34853 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-08T00:48:30,717 WARN [IPC Server handler 2 on default port 34853 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-08T00:48:30,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36109 is added to blk_1073741872_1055 (size=6027) 2024-12-08T00:48:31,107 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@a350071[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:36109, datanodeUuid=cab2c1f4-0b38-495b-81c5-dd7bc55561eb, infoPort=36397, infoSecurePort=0, ipcPort=41757, storageInfo=lv=-57;cid=testClusterID;nsid=1708086423;c=1733618886417):Failed to transfer BP-791727382-172.17.0.2-1733618886417:blk_1073741862_1045 to 127.0.0.1:33451 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:48:31,107 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@2882550b[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:36109, datanodeUuid=cab2c1f4-0b38-495b-81c5-dd7bc55561eb, infoPort=36397, infoSecurePort=0, ipcPort=41757, storageInfo=lv=-57;cid=testClusterID;nsid=1708086423;c=1733618886417):Failed to transfer BP-791727382-172.17.0.2-1733618886417:blk_1073741842_1025 to 127.0.0.1:36435 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:48:31,122 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/.tmp/info/d4f12b91364a478da9e5585debf7058a 2024-12-08T00:48:31,135 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/.tmp/info/d4f12b91364a478da9e5585debf7058a as hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/info/d4f12b91364a478da9e5585debf7058a 2024-12-08T00:48:31,142 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/info/d4f12b91364a478da9e5585debf7058a, entries=1, sequenceid=45, filesize=5.9 K 2024-12-08T00:48:31,144 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 8eb80a1ef3b100de075d28e6732c96d0 in 446ms, sequenceid=45, compaction requested=false 2024-12-08T00:48:31,144 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8eb80a1ef3b100de075d28e6732c96d0: 2024-12-08T00:48:31,144 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-12-08T00:48:31,144 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T00:48:31,144 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/info/017f5286c5e64f5cae412d722f8d4561 because midkey is the same as first or last row 2024-12-08T00:48:31,180 WARN [regionserver/0f983e3e5be1:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36109,DS-958b2e8f-e0e1-44dc-a03b-46f8f4aa8441,DISK]] 2024-12-08T00:48:31,181 INFO [regionserver/0f983e3e5be1:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:31,181 DEBUG [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 0f983e3e5be1%2C33993%2C1733618888281:(num 1733618909159) roll requested 2024-12-08T00:48:31,181 INFO [regionserver/0f983e3e5be1:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0f983e3e5be1%2C33993%2C1733618888281.1733618911181 2024-12-08T00:48:31,186 WARN [Thread-957 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741873_1056 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:31,187 WARN [Thread-957 {}] hdfs.DataStreamer(1731): Error Recovery for BP-791727382-172.17.0.2-1733618886417:blk_1073741873_1056 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK], DatanodeInfoWithStorage[127.0.0.1:37879,DS-796f6687-3a76-48ad-a50d-1d0026d41077,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK]) is bad. 2024-12-08T00:48:31,187 WARN [Thread-957 {}] hdfs.DataStreamer(1850): Abandoning BP-791727382-172.17.0.2-1733618886417:blk_1073741873_1056 2024-12-08T00:48:31,188 WARN [Thread-957 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK] 2024-12-08T00:48:31,191 WARN [Thread-957 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:31,191 WARN [Thread-957 {}] hdfs.DataStreamer(1731): Error Recovery for BP-791727382-172.17.0.2-1733618886417:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33451,DS-fb1c61b5-07ec-4bb6-9b70-43af2b3fbbb9,DISK], DatanodeInfoWithStorage[127.0.0.1:36109,DS-958b2e8f-e0e1-44dc-a03b-46f8f4aa8441,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33451,DS-fb1c61b5-07ec-4bb6-9b70-43af2b3fbbb9,DISK]) is bad. 2024-12-08T00:48:31,191 WARN [Thread-957 {}] hdfs.DataStreamer(1850): Abandoning BP-791727382-172.17.0.2-1733618886417:blk_1073741874_1057 2024-12-08T00:48:31,192 WARN [Thread-957 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33451,DS-fb1c61b5-07ec-4bb6-9b70-43af2b3fbbb9,DISK] 2024-12-08T00:48:31,194 WARN [Thread-957 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:31,194 WARN [Thread-957 {}] hdfs.DataStreamer(1731): Error Recovery for BP-791727382-172.17.0.2-1733618886417:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36435,DS-c01c0122-359b-4f15-840f-3c1bed6f09a4,DISK], DatanodeInfoWithStorage[127.0.0.1:37879,DS-796f6687-3a76-48ad-a50d-1d0026d41077,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36435,DS-c01c0122-359b-4f15-840f-3c1bed6f09a4,DISK]) is bad. 2024-12-08T00:48:31,194 WARN [Thread-957 {}] hdfs.DataStreamer(1850): Abandoning BP-791727382-172.17.0.2-1733618886417:blk_1073741875_1058 2024-12-08T00:48:31,195 WARN [Thread-957 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36435,DS-c01c0122-359b-4f15-840f-3c1bed6f09a4,DISK] 2024-12-08T00:48:31,196 WARN [Thread-957 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741876_1059 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:31,196 WARN [Thread-957 {}] hdfs.DataStreamer(1731): Error Recovery for BP-791727382-172.17.0.2-1733618886417:blk_1073741876_1059 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37879,DS-796f6687-3a76-48ad-a50d-1d0026d41077,DISK], DatanodeInfoWithStorage[127.0.0.1:36109,DS-958b2e8f-e0e1-44dc-a03b-46f8f4aa8441,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37879,DS-796f6687-3a76-48ad-a50d-1d0026d41077,DISK]) is bad. 2024-12-08T00:48:31,196 WARN [Thread-957 {}] hdfs.DataStreamer(1850): Abandoning BP-791727382-172.17.0.2-1733618886417:blk_1073741876_1059 2024-12-08T00:48:31,196 WARN [Thread-957 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37879,DS-796f6687-3a76-48ad-a50d-1d0026d41077,DISK] 2024-12-08T00:48:31,197 WARN [IPC Server handler 2 on default port 34853 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-08T00:48:31,197 WARN [IPC Server handler 2 on default port 34853 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-08T00:48:31,197 WARN [IPC Server handler 2 on default port 34853 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-08T00:48:31,199 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:31,200 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:31,200 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:31,200 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:31,200 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:31,200 INFO [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.1733618909159 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.1733618911181 2024-12-08T00:48:31,201 DEBUG [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36397:36397)] 2024-12-08T00:48:31,201 DEBUG [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.1733618888829 is not closed yet, will try archiving it next time 2024-12-08T00:48:31,201 DEBUG [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.1733618909159 is not closed yet, will try archiving it next time 2024-12-08T00:48:31,201 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.1733618905135 to hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/oldWALs/0f983e3e5be1%2C33993%2C1733618888281.1733618905135 2024-12-08T00:48:31,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36109 is added to blk_1073741857_1040 (size=13591) 2024-12-08T00:48:31,604 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.1733618888829 is not closed yet, will try archiving it next time 2024-12-08T00:48:31,751 INFO [regionserver/0f983e3e5be1:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:32,109 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@2882550b[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:36109, datanodeUuid=cab2c1f4-0b38-495b-81c5-dd7bc55561eb, infoPort=36397, infoSecurePort=0, ipcPort=41757, storageInfo=lv=-57;cid=testClusterID;nsid=1708086423;c=1733618886417):Failed to transfer BP-791727382-172.17.0.2-1733618886417:blk_1073741867_1050 to 127.0.0.1:33451 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:48:32,109 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@a350071[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:36109, datanodeUuid=cab2c1f4-0b38-495b-81c5-dd7bc55561eb, infoPort=36397, infoSecurePort=0, ipcPort=41757, storageInfo=lv=-57;cid=testClusterID;nsid=1708086423;c=1733618886417):Failed to transfer BP-791727382-172.17.0.2-1733618886417:blk_1073741872_1055 to 127.0.0.1:45779 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:48:32,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33993 {}] regionserver.HRegion(8855): Flush requested on 8eb80a1ef3b100de075d28e6732c96d0 2024-12-08T00:48:32,135 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8eb80a1ef3b100de075d28e6732c96d0 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-08T00:48:32,147 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/.tmp/info/ae1965db5971462dba6354e23d6f7dbd is 1079, key is tmprow/info:/1733618912132/Put/seqid=0 2024-12-08T00:48:32,149 WARN [Thread-961 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741878_1061 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:32,149 WARN [Thread-961 {}] hdfs.DataStreamer(1731): Error Recovery for BP-791727382-172.17.0.2-1733618886417:blk_1073741878_1061 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33451,DS-fb1c61b5-07ec-4bb6-9b70-43af2b3fbbb9,DISK], DatanodeInfoWithStorage[127.0.0.1:36435,DS-c01c0122-359b-4f15-840f-3c1bed6f09a4,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33451,DS-fb1c61b5-07ec-4bb6-9b70-43af2b3fbbb9,DISK]) is bad. 2024-12-08T00:48:32,149 WARN [Thread-961 {}] hdfs.DataStreamer(1850): Abandoning BP-791727382-172.17.0.2-1733618886417:blk_1073741878_1061 2024-12-08T00:48:32,149 WARN [Thread-961 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33451,DS-fb1c61b5-07ec-4bb6-9b70-43af2b3fbbb9,DISK] 2024-12-08T00:48:32,152 WARN [Thread-961 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1062 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45779 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:32,152 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1208211810_22 at /127.0.0.1:43280 [Receiving block BP-791727382-172.17.0.2-1733618886417:blk_1073741879_1062] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/cluster_e74a2886-159c-d840-b9ca-6af3ceb02e7c/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/cluster_e74a2886-159c-d840-b9ca-6af3ceb02e7c/data/data10]'}, localName='127.0.0.1:36109', datanodeUuid='cab2c1f4-0b38-495b-81c5-dd7bc55561eb', xmitsInProgress=0}:Exception transferring block BP-791727382-172.17.0.2-1733618886417:blk_1073741879_1062 to mirror 127.0.0.1:45779 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:48:32,152 WARN [Thread-961 {}] hdfs.DataStreamer(1731): Error Recovery for BP-791727382-172.17.0.2-1733618886417:blk_1073741879_1062 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36109,DS-958b2e8f-e0e1-44dc-a03b-46f8f4aa8441,DISK], DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK]) is bad. 2024-12-08T00:48:32,152 WARN [Thread-961 {}] hdfs.DataStreamer(1850): Abandoning BP-791727382-172.17.0.2-1733618886417:blk_1073741879_1062 2024-12-08T00:48:32,152 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1208211810_22 at /127.0.0.1:43280 [Receiving block BP-791727382-172.17.0.2-1733618886417:blk_1073741879_1062] {}] datanode.BlockReceiver(316): Block 1073741879 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-08T00:48:32,152 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1208211810_22 at /127.0.0.1:43280 [Receiving block BP-791727382-172.17.0.2-1733618886417:blk_1073741879_1062] {}] datanode.DataXceiver(331): 127.0.0.1:36109:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43280 dst: /127.0.0.1:36109 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:48:32,153 WARN [Thread-961 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK] 2024-12-08T00:48:32,155 WARN [Thread-961 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741880_1063 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37879 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:32,155 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1208211810_22 at /127.0.0.1:43282 [Receiving block BP-791727382-172.17.0.2-1733618886417:blk_1073741880_1063] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/cluster_e74a2886-159c-d840-b9ca-6af3ceb02e7c/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/cluster_e74a2886-159c-d840-b9ca-6af3ceb02e7c/data/data10]'}, localName='127.0.0.1:36109', datanodeUuid='cab2c1f4-0b38-495b-81c5-dd7bc55561eb', xmitsInProgress=0}:Exception transferring block BP-791727382-172.17.0.2-1733618886417:blk_1073741880_1063 to mirror 127.0.0.1:37879 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:48:32,155 WARN [Thread-961 {}] hdfs.DataStreamer(1731): Error Recovery for BP-791727382-172.17.0.2-1733618886417:blk_1073741880_1063 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36109,DS-958b2e8f-e0e1-44dc-a03b-46f8f4aa8441,DISK], DatanodeInfoWithStorage[127.0.0.1:37879,DS-796f6687-3a76-48ad-a50d-1d0026d41077,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37879,DS-796f6687-3a76-48ad-a50d-1d0026d41077,DISK]) is bad. 2024-12-08T00:48:32,155 WARN [Thread-961 {}] hdfs.DataStreamer(1850): Abandoning BP-791727382-172.17.0.2-1733618886417:blk_1073741880_1063 2024-12-08T00:48:32,155 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1208211810_22 at /127.0.0.1:43282 [Receiving block BP-791727382-172.17.0.2-1733618886417:blk_1073741880_1063] {}] datanode.BlockReceiver(316): Block 1073741880 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-08T00:48:32,155 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1208211810_22 at /127.0.0.1:43282 [Receiving block BP-791727382-172.17.0.2-1733618886417:blk_1073741880_1063] {}] datanode.DataXceiver(331): 127.0.0.1:36109:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43282 dst: /127.0.0.1:36109 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:48:32,156 WARN [Thread-961 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37879,DS-796f6687-3a76-48ad-a50d-1d0026d41077,DISK] 2024-12-08T00:48:32,158 WARN [Thread-961 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741881_1064 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36435 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:32,158 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1208211810_22 at /127.0.0.1:43296 [Receiving block BP-791727382-172.17.0.2-1733618886417:blk_1073741881_1064] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/cluster_e74a2886-159c-d840-b9ca-6af3ceb02e7c/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/cluster_e74a2886-159c-d840-b9ca-6af3ceb02e7c/data/data10]'}, localName='127.0.0.1:36109', datanodeUuid='cab2c1f4-0b38-495b-81c5-dd7bc55561eb', xmitsInProgress=0}:Exception transferring block BP-791727382-172.17.0.2-1733618886417:blk_1073741881_1064 to mirror 127.0.0.1:36435 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:48:32,159 WARN [Thread-961 {}] hdfs.DataStreamer(1731): Error Recovery for BP-791727382-172.17.0.2-1733618886417:blk_1073741881_1064 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36109,DS-958b2e8f-e0e1-44dc-a03b-46f8f4aa8441,DISK], DatanodeInfoWithStorage[127.0.0.1:36435,DS-c01c0122-359b-4f15-840f-3c1bed6f09a4,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36435,DS-c01c0122-359b-4f15-840f-3c1bed6f09a4,DISK]) is bad. 2024-12-08T00:48:32,159 WARN [Thread-961 {}] hdfs.DataStreamer(1850): Abandoning BP-791727382-172.17.0.2-1733618886417:blk_1073741881_1064 2024-12-08T00:48:32,159 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1208211810_22 at /127.0.0.1:43296 [Receiving block BP-791727382-172.17.0.2-1733618886417:blk_1073741881_1064] {}] datanode.BlockReceiver(316): Block 1073741881 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-08T00:48:32,159 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1208211810_22 at /127.0.0.1:43296 [Receiving block BP-791727382-172.17.0.2-1733618886417:blk_1073741881_1064] {}] datanode.DataXceiver(331): 127.0.0.1:36109:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43296 dst: /127.0.0.1:36109 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:48:32,160 WARN [Thread-961 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36435,DS-c01c0122-359b-4f15-840f-3c1bed6f09a4,DISK] 2024-12-08T00:48:32,160 WARN [IPC Server handler 2 on default port 34853 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-08T00:48:32,160 WARN [IPC Server handler 2 on default port 34853 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-08T00:48:32,161 WARN [IPC Server handler 2 on default port 34853 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-08T00:48:32,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36109 is added to blk_1073741882_1065 (size=6027) 2024-12-08T00:48:32,384 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:32,566 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/.tmp/info/ae1965db5971462dba6354e23d6f7dbd 2024-12-08T00:48:32,579 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/.tmp/info/ae1965db5971462dba6354e23d6f7dbd as hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/info/ae1965db5971462dba6354e23d6f7dbd 2024-12-08T00:48:32,586 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/info/ae1965db5971462dba6354e23d6f7dbd, entries=1, sequenceid=55, filesize=5.9 K 2024-12-08T00:48:32,587 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 8eb80a1ef3b100de075d28e6732c96d0 in 452ms, sequenceid=55, compaction requested=true 2024-12-08T00:48:32,588 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8eb80a1ef3b100de075d28e6732c96d0: 2024-12-08T00:48:32,588 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=29.3 K, sizeToCheck=16.0 K 2024-12-08T00:48:32,588 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T00:48:32,588 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/info/017f5286c5e64f5cae412d722f8d4561 because midkey is the same as first or last row 2024-12-08T00:48:32,588 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8eb80a1ef3b100de075d28e6732c96d0:info, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:48:32,588 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:48:32,588 DEBUG [RS:0;0f983e3e5be1:33993-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:48:32,590 DEBUG [RS:0;0f983e3e5be1:33993-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 30048 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:48:32,590 DEBUG [RS:0;0f983e3e5be1:33993-shortCompactions-0 {}] regionserver.HStore(1541): 8eb80a1ef3b100de075d28e6732c96d0/info is initiating minor compaction (all files) 2024-12-08T00:48:32,590 INFO [RS:0;0f983e3e5be1:33993-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 8eb80a1ef3b100de075d28e6732c96d0/info in TestLogRolling-testLogRollOnDatanodeDeath,,1733618889776.8eb80a1ef3b100de075d28e6732c96d0. 2024-12-08T00:48:32,590 INFO [RS:0;0f983e3e5be1:33993-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/info/017f5286c5e64f5cae412d722f8d4561, hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/info/d4f12b91364a478da9e5585debf7058a, hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/info/ae1965db5971462dba6354e23d6f7dbd] into tmpdir=hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/.tmp, totalSize=29.3 K 2024-12-08T00:48:32,591 DEBUG [RS:0;0f983e3e5be1:33993-shortCompactions-0 {}] compactions.Compactor(225): Compacting 017f5286c5e64f5cae412d722f8d4561, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1733618903184 2024-12-08T00:48:32,591 DEBUG [RS:0;0f983e3e5be1:33993-shortCompactions-0 {}] compactions.Compactor(225): Compacting d4f12b91364a478da9e5585debf7058a, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1733618910695 2024-12-08T00:48:32,592 DEBUG [RS:0;0f983e3e5be1:33993-shortCompactions-0 {}] compactions.Compactor(225): Compacting ae1965db5971462dba6354e23d6f7dbd, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733618912132 2024-12-08T00:48:32,609 INFO [RS:0;0f983e3e5be1:33993-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8eb80a1ef3b100de075d28e6732c96d0#info#compaction#24 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:48:32,609 DEBUG [RS:0;0f983e3e5be1:33993-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/.tmp/info/5edfe8c82b634e9584a131eecf25e118 is 1080, key is row0002/info:/1733618903184/Put/seqid=0 2024-12-08T00:48:32,611 WARN [Thread-968 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741883_1066 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:32,611 WARN [Thread-968 {}] hdfs.DataStreamer(1731): Error Recovery for BP-791727382-172.17.0.2-1733618886417:blk_1073741883_1066 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33451,DS-fb1c61b5-07ec-4bb6-9b70-43af2b3fbbb9,DISK], DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33451,DS-fb1c61b5-07ec-4bb6-9b70-43af2b3fbbb9,DISK]) is bad. 2024-12-08T00:48:32,611 WARN [Thread-968 {}] hdfs.DataStreamer(1850): Abandoning BP-791727382-172.17.0.2-1733618886417:blk_1073741883_1066 2024-12-08T00:48:32,612 WARN [Thread-968 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33451,DS-fb1c61b5-07ec-4bb6-9b70-43af2b3fbbb9,DISK] 2024-12-08T00:48:32,613 WARN [Thread-968 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741884_1067 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:32,613 WARN [Thread-968 {}] hdfs.DataStreamer(1731): Error Recovery for BP-791727382-172.17.0.2-1733618886417:blk_1073741884_1067 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37879,DS-796f6687-3a76-48ad-a50d-1d0026d41077,DISK], DatanodeInfoWithStorage[127.0.0.1:36109,DS-958b2e8f-e0e1-44dc-a03b-46f8f4aa8441,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37879,DS-796f6687-3a76-48ad-a50d-1d0026d41077,DISK]) is bad. 2024-12-08T00:48:32,613 WARN [Thread-968 {}] hdfs.DataStreamer(1850): Abandoning BP-791727382-172.17.0.2-1733618886417:blk_1073741884_1067 2024-12-08T00:48:32,613 WARN [Thread-968 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37879,DS-796f6687-3a76-48ad-a50d-1d0026d41077,DISK] 2024-12-08T00:48:32,615 WARN [Thread-968 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741885_1068 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36435 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:32,615 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1208211810_22 at /127.0.0.1:43316 [Receiving block BP-791727382-172.17.0.2-1733618886417:blk_1073741885_1068] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/cluster_e74a2886-159c-d840-b9ca-6af3ceb02e7c/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/cluster_e74a2886-159c-d840-b9ca-6af3ceb02e7c/data/data10]'}, localName='127.0.0.1:36109', datanodeUuid='cab2c1f4-0b38-495b-81c5-dd7bc55561eb', xmitsInProgress=0}:Exception transferring block BP-791727382-172.17.0.2-1733618886417:blk_1073741885_1068 to mirror 127.0.0.1:36435 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:48:32,615 WARN [Thread-968 {}] hdfs.DataStreamer(1731): Error Recovery for BP-791727382-172.17.0.2-1733618886417:blk_1073741885_1068 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36109,DS-958b2e8f-e0e1-44dc-a03b-46f8f4aa8441,DISK], DatanodeInfoWithStorage[127.0.0.1:36435,DS-c01c0122-359b-4f15-840f-3c1bed6f09a4,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36435,DS-c01c0122-359b-4f15-840f-3c1bed6f09a4,DISK]) is bad. 2024-12-08T00:48:32,615 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1208211810_22 at /127.0.0.1:43316 [Receiving block BP-791727382-172.17.0.2-1733618886417:blk_1073741885_1068] {}] datanode.BlockReceiver(316): Block 1073741885 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-08T00:48:32,615 WARN [Thread-968 {}] hdfs.DataStreamer(1850): Abandoning BP-791727382-172.17.0.2-1733618886417:blk_1073741885_1068 2024-12-08T00:48:32,615 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1208211810_22 at /127.0.0.1:43316 [Receiving block BP-791727382-172.17.0.2-1733618886417:blk_1073741885_1068] {}] datanode.DataXceiver(331): 127.0.0.1:36109:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43316 dst: /127.0.0.1:36109 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:48:32,616 WARN [Thread-968 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36435,DS-c01c0122-359b-4f15-840f-3c1bed6f09a4,DISK] 2024-12-08T00:48:32,617 WARN [Thread-968 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741886_1069 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:32,617 WARN [Thread-968 {}] hdfs.DataStreamer(1731): Error Recovery for BP-791727382-172.17.0.2-1733618886417:blk_1073741886_1069 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK], DatanodeInfoWithStorage[127.0.0.1:36109,DS-958b2e8f-e0e1-44dc-a03b-46f8f4aa8441,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK]) is bad. 2024-12-08T00:48:32,617 WARN [Thread-968 {}] hdfs.DataStreamer(1850): Abandoning BP-791727382-172.17.0.2-1733618886417:blk_1073741886_1069 2024-12-08T00:48:32,617 WARN [Thread-968 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK] 2024-12-08T00:48:32,618 WARN [IPC Server handler 2 on default port 34853 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-08T00:48:32,618 WARN [IPC Server handler 2 on default port 34853 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-08T00:48:32,618 WARN [IPC Server handler 2 on default port 34853 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-08T00:48:32,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36109 is added to blk_1073741887_1070 (size=18097) 2024-12-08T00:48:33,037 DEBUG [RS:0;0f983e3e5be1:33993-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/.tmp/info/5edfe8c82b634e9584a131eecf25e118 as hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/info/5edfe8c82b634e9584a131eecf25e118 2024-12-08T00:48:33,044 INFO [RS:0;0f983e3e5be1:33993-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 8eb80a1ef3b100de075d28e6732c96d0/info of 8eb80a1ef3b100de075d28e6732c96d0 into 5edfe8c82b634e9584a131eecf25e118(size=17.7 K), total size for store is 17.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:48:33,044 DEBUG [RS:0;0f983e3e5be1:33993-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 8eb80a1ef3b100de075d28e6732c96d0: 2024-12-08T00:48:33,044 INFO [RS:0;0f983e3e5be1:33993-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1733618889776.8eb80a1ef3b100de075d28e6732c96d0., storeName=8eb80a1ef3b100de075d28e6732c96d0/info, priority=13, startTime=1733618912588; duration=0sec 2024-12-08T00:48:33,044 DEBUG [RS:0;0f983e3e5be1:33993-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-12-08T00:48:33,044 DEBUG [RS:0;0f983e3e5be1:33993-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T00:48:33,044 DEBUG [RS:0;0f983e3e5be1:33993-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/info/5edfe8c82b634e9584a131eecf25e118 because midkey is the same as first or last row 2024-12-08T00:48:33,044 DEBUG [RS:0;0f983e3e5be1:33993-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-12-08T00:48:33,045 DEBUG [RS:0;0f983e3e5be1:33993-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T00:48:33,045 DEBUG [RS:0;0f983e3e5be1:33993-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/info/5edfe8c82b634e9584a131eecf25e118 because midkey is the same as first or last row 2024-12-08T00:48:33,045 DEBUG [RS:0;0f983e3e5be1:33993-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-12-08T00:48:33,045 DEBUG [RS:0;0f983e3e5be1:33993-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T00:48:33,045 DEBUG [RS:0;0f983e3e5be1:33993-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/info/5edfe8c82b634e9584a131eecf25e118 because midkey is the same as first or last row 2024-12-08T00:48:33,045 DEBUG [RS:0;0f983e3e5be1:33993-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:48:33,045 DEBUG [RS:0;0f983e3e5be1:33993-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8eb80a1ef3b100de075d28e6732c96d0:info 2024-12-08T00:48:33,201 WARN [regionserver/0f983e3e5be1:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-12-08T00:48:33,201 INFO [regionserver/0f983e3e5be1:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:33,382 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T00:48:33,385 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T00:48:33,386 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T00:48:33,386 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T00:48:33,386 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-08T00:48:33,386 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@13a29b23{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/hadoop.log.dir/,AVAILABLE} 2024-12-08T00:48:33,386 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4f38666d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T00:48:33,476 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@358cd92{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/java.io.tmpdir/jetty-localhost-42129-hadoop-hdfs-3_4_1-tests_jar-_-any-9090847664316596107/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T00:48:33,476 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@207a6434{HTTP/1.1, (http/1.1)}{localhost:42129} 2024-12-08T00:48:33,476 INFO [Time-limited test {}] server.Server(415): Started @132906ms 2024-12-08T00:48:33,477 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T00:48:33,752 INFO [regionserver/0f983e3e5be1:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:33,820 WARN [Thread-987 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T00:48:33,826 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6c056789cdb02879 with lease ID 0x3478e23613d90b4b: from storage DS-796f6687-3a76-48ad-a50d-1d0026d41077 node DatanodeRegistration(127.0.0.1:41113, datanodeUuid=94032d42-a92f-496f-a54d-179c92434f44, infoPort=43149, infoSecurePort=0, ipcPort=39465, storageInfo=lv=-57;cid=testClusterID;nsid=1708086423;c=1733618886417), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T00:48:33,827 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6c056789cdb02879 with lease ID 0x3478e23613d90b4b: from storage DS-9659f3c7-076f-4572-a003-e7d63b0b73ee node DatanodeRegistration(127.0.0.1:41113, datanodeUuid=94032d42-a92f-496f-a54d-179c92434f44, infoPort=43149, infoSecurePort=0, ipcPort=39465, storageInfo=lv=-57;cid=testClusterID;nsid=1708086423;c=1733618886417), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T00:48:34,108 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@2882550b[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:36109, datanodeUuid=cab2c1f4-0b38-495b-81c5-dd7bc55561eb, infoPort=36397, infoSecurePort=0, ipcPort=41757, storageInfo=lv=-57;cid=testClusterID;nsid=1708086423;c=1733618886417):Failed to transfer BP-791727382-172.17.0.2-1733618886417:blk_1073741882_1065 to 127.0.0.1:33451 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:48:34,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41113 is added to blk_1073741857_1040 (size=13591) 2024-12-08T00:48:34,384 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:35,110 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@a350071[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:36109, datanodeUuid=cab2c1f4-0b38-495b-81c5-dd7bc55561eb, infoPort=36397, infoSecurePort=0, ipcPort=41757, storageInfo=lv=-57;cid=testClusterID;nsid=1708086423;c=1733618886417):Failed to transfer BP-791727382-172.17.0.2-1733618886417:blk_1073741887_1070 to 127.0.0.1:36435 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:48:35,202 INFO [regionserver/0f983e3e5be1:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:35,752 INFO [regionserver/0f983e3e5be1:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:36,385 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:37,203 INFO [regionserver/0f983e3e5be1:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:37,753 INFO [regionserver/0f983e3e5be1:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:38,123 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-08T00:48:38,385 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:38,598 ERROR [FSHLog-0-hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/MasterData-prefix:0f983e3e5be1,41445,1733618888143 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:38,598 WARN [FSHLog-0-hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/MasterData-prefix:0f983e3e5be1,41445,1733618888143 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:38,599 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 0f983e3e5be1%2C41445%2C1733618888143:(num 1733618888401) roll requested 2024-12-08T00:48:38,600 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0f983e3e5be1%2C41445%2C1733618888143.1733618918599 2024-12-08T00:48:38,608 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:38,609 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:38,609 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:38,609 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:38,609 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:38,609 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/MasterData/WALs/0f983e3e5be1,41445,1733618888143/0f983e3e5be1%2C41445%2C1733618888143.1733618888401 with entries=54, filesize=26.65 KB; new WAL /user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/MasterData/WALs/0f983e3e5be1,41445,1733618888143/0f983e3e5be1%2C41445%2C1733618888143.1733618918599 2024-12-08T00:48:38,610 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:38,610 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:38,610 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/MasterData/WALs/0f983e3e5be1,41445,1733618888143/0f983e3e5be1%2C41445%2C1733618888143.1733618888401 2024-12-08T00:48:38,610 WARN [IPC Server handler 2 on default port 34853 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/MasterData/WALs/0f983e3e5be1,41445,1733618888143/0f983e3e5be1%2C41445%2C1733618888143.1733618888401 has not been closed. Lease recovery is in progress. RecoveryId = 1072 for block blk_1073741830_1006 2024-12-08T00:48:38,611 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/MasterData/WALs/0f983e3e5be1,41445,1733618888143/0f983e3e5be1%2C41445%2C1733618888143.1733618888401 after 1ms 2024-12-08T00:48:38,611 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36397:36397),(127.0.0.1/127.0.0.1:43149:43149)] 2024-12-08T00:48:38,611 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/MasterData/WALs/0f983e3e5be1,41445,1733618888143/0f983e3e5be1%2C41445%2C1733618888143.1733618888401 is not closed yet, will try archiving it next time 2024-12-08T00:48:39,204 INFO [regionserver/0f983e3e5be1:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:39,753 INFO [regionserver/0f983e3e5be1:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:41,204 INFO [regionserver/0f983e3e5be1:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:41,754 INFO [regionserver/0f983e3e5be1:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:42,613 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/MasterData/WALs/0f983e3e5be1,41445,1733618888143/0f983e3e5be1%2C41445%2C1733618888143.1733618888401 after 4003ms 2024-12-08T00:48:43,205 INFO [regionserver/0f983e3e5be1:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:43,755 INFO [regionserver/0f983e3e5be1:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:43,828 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4a53e685[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:41113, datanodeUuid=94032d42-a92f-496f-a54d-179c92434f44, infoPort=43149, infoSecurePort=0, ipcPort=39465, storageInfo=lv=-57;cid=testClusterID;nsid=1708086423;c=1733618886417):Failed to transfer BP-791727382-172.17.0.2-1733618886417:blk_1073741835_1011 to 127.0.0.1:33451 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:48:43,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36109 is added to blk_1073741833_1009 (size=32) 2024-12-08T00:48:43,842 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@7be298e1 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-791727382-172.17.0.2-1733618886417:blk_1073741832_1008, datanode=DatanodeInfoWithStorage[127.0.0.1:45779,null,null]) java.net.ConnectException: Call From 0f983e3e5be1/172.17.0.2 to localhost:42159 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-08T00:48:43,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41113 is added to blk_1073741832_1019 (size=455) 2024-12-08T00:48:44,168 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.1733618888829 to hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/oldWALs/0f983e3e5be1%2C33993%2C1733618888281.1733618888829 2024-12-08T00:48:44,172 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.1733618909159 to hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/oldWALs/0f983e3e5be1%2C33993%2C1733618888281.1733618909159 2024-12-08T00:48:44,828 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4a53e685[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:41113, datanodeUuid=94032d42-a92f-496f-a54d-179c92434f44, infoPort=43149, infoSecurePort=0, ipcPort=39465, storageInfo=lv=-57;cid=testClusterID;nsid=1708086423;c=1733618886417):Failed to transfer BP-791727382-172.17.0.2-1733618886417:blk_1073741829_1005 to 127.0.0.1:33451 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:48:44,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36109 is added to blk_1073741831_1007 (size=1321) 2024-12-08T00:48:45,206 INFO [regionserver/0f983e3e5be1:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:45,755 INFO [regionserver/0f983e3e5be1:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:46,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36109 is added to blk_1073741826_1002 (size=42) 2024-12-08T00:48:46,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36109 is added to blk_1073741832_1019 (size=455) 2024-12-08T00:48:46,994 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0f983e3e5be1%2C33993%2C1733618888281.1733618926994 2024-12-08T00:48:47,005 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:47,005 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:47,005 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:47,005 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:47,005 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:47,006 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.1733618911181 with entries=13, filesize=12.60 KB; new WAL /user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.1733618926994 2024-12-08T00:48:47,007 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43149:43149),(127.0.0.1/127.0.0.1:36397:36397)] 2024-12-08T00:48:47,007 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.1733618911181 is not closed yet, will try archiving it next time 2024-12-08T00:48:47,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36109 is added to blk_1073741877_1060 (size=12911) 2024-12-08T00:48:47,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33993 {}] regionserver.HRegion(8855): Flush requested on 8eb80a1ef3b100de075d28e6732c96d0 2024-12-08T00:48:47,011 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8eb80a1ef3b100de075d28e6732c96d0 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-08T00:48:47,016 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/.tmp/info/7a563f61241f424b802a048854ac6b05 is 1080, key is row0013/info:/1733618927008/Put/seqid=0 2024-12-08T00:48:47,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36109 is added to blk_1073741890_1074 (size=8190) 2024-12-08T00:48:47,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41113 is added to blk_1073741890_1074 (size=8190) 2024-12-08T00:48:47,029 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/.tmp/info/7a563f61241f424b802a048854ac6b05 2024-12-08T00:48:47,035 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/.tmp/info/7a563f61241f424b802a048854ac6b05 as hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/info/7a563f61241f424b802a048854ac6b05 2024-12-08T00:48:47,041 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/info/7a563f61241f424b802a048854ac6b05, entries=3, sequenceid=66, filesize=8.0 K 2024-12-08T00:48:47,042 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7527, heapSize ~8.11 KB/8304, currentSize=9.46 KB/9683 for 8eb80a1ef3b100de075d28e6732c96d0 in 31ms, sequenceid=66, compaction requested=false 2024-12-08T00:48:47,042 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8eb80a1ef3b100de075d28e6732c96d0: 2024-12-08T00:48:47,042 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=25.7 K, sizeToCheck=16.0 K 2024-12-08T00:48:47,042 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T00:48:47,042 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/info/5edfe8c82b634e9584a131eecf25e118 because midkey is the same as first or last row 2024-12-08T00:48:47,206 INFO [regionserver/0f983e3e5be1:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-12-08T00:48:47,206 INFO [regionserver/0f983e3e5be1:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:47,230 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-08T00:48:47,230 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-08T00:48:47,231 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T00:48:47,231 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:48:47,231 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:48:47,231 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-08T00:48:47,232 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-08T00:48:47,232 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1609523067, stopped=false 2024-12-08T00:48:47,232 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=0f983e3e5be1,41445,1733618888143 2024-12-08T00:48:47,295 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33993-0x10002f2ac5a0001, quorum=127.0.0.1:53241, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T00:48:47,296 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41445-0x10002f2ac5a0000, quorum=127.0.0.1:53241, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T00:48:47,296 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46325-0x10002f2ac5a0002, quorum=127.0.0.1:53241, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T00:48:47,296 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-08T00:48:47,296 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41445-0x10002f2ac5a0000, quorum=127.0.0.1:53241, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:48:47,296 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46325-0x10002f2ac5a0002, quorum=127.0.0.1:53241, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:48:47,296 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33993-0x10002f2ac5a0001, quorum=127.0.0.1:53241, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:48:47,296 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-08T00:48:47,297 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T00:48:47,298 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:48:47,298 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '0f983e3e5be1,33993,1733618888281' ***** 2024-12-08T00:48:47,298 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41445-0x10002f2ac5a0000, quorum=127.0.0.1:53241, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T00:48:47,298 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46325-0x10002f2ac5a0002, quorum=127.0.0.1:53241, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T00:48:47,298 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-08T00:48:47,298 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '0f983e3e5be1,46325,1733618889648' ***** 2024-12-08T00:48:47,299 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-08T00:48:47,299 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33993-0x10002f2ac5a0001, quorum=127.0.0.1:53241, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T00:48:47,299 INFO [RS:0;0f983e3e5be1:33993 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-08T00:48:47,299 INFO [RS:1;0f983e3e5be1:46325 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-08T00:48:47,299 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-08T00:48:47,299 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-08T00:48:47,299 INFO [RS:0;0f983e3e5be1:33993 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-08T00:48:47,299 INFO [RS:0;0f983e3e5be1:33993 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-08T00:48:47,300 INFO [RS:0;0f983e3e5be1:33993 {}] regionserver.HRegionServer(3091): Received CLOSE for 8eb80a1ef3b100de075d28e6732c96d0 2024-12-08T00:48:47,300 INFO [RS:1;0f983e3e5be1:46325 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-08T00:48:47,300 INFO [RS:1;0f983e3e5be1:46325 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-08T00:48:47,300 INFO [RS:1;0f983e3e5be1:46325 {}] regionserver.HRegionServer(959): stopping server 0f983e3e5be1,46325,1733618889648 2024-12-08T00:48:47,300 INFO [RS:0;0f983e3e5be1:33993 {}] regionserver.HRegionServer(959): stopping server 0f983e3e5be1,33993,1733618888281 2024-12-08T00:48:47,300 INFO [RS:1;0f983e3e5be1:46325 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T00:48:47,300 INFO [RS:0;0f983e3e5be1:33993 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T00:48:47,300 INFO [RS:0;0f983e3e5be1:33993 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;0f983e3e5be1:33993. 2024-12-08T00:48:47,300 INFO [RS:1;0f983e3e5be1:46325 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;0f983e3e5be1:46325. 2024-12-08T00:48:47,300 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 8eb80a1ef3b100de075d28e6732c96d0, disabling compactions & flushes 2024-12-08T00:48:47,301 DEBUG [RS:1;0f983e3e5be1:46325 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T00:48:47,301 DEBUG [RS:0;0f983e3e5be1:33993 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T00:48:47,301 DEBUG [RS:1;0f983e3e5be1:46325 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:48:47,301 INFO [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1733618889776.8eb80a1ef3b100de075d28e6732c96d0. 2024-12-08T00:48:47,301 DEBUG [RS:0;0f983e3e5be1:33993 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:48:47,301 INFO [RS:1;0f983e3e5be1:46325 {}] regionserver.HRegionServer(976): stopping server 0f983e3e5be1,46325,1733618889648; all regions closed. 2024-12-08T00:48:47,301 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733618889776.8eb80a1ef3b100de075d28e6732c96d0. 2024-12-08T00:48:47,301 INFO [RS:0;0f983e3e5be1:33993 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-08T00:48:47,301 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733618889776.8eb80a1ef3b100de075d28e6732c96d0. after waiting 0 ms 2024-12-08T00:48:47,301 INFO [RS:0;0f983e3e5be1:33993 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-08T00:48:47,301 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1733618889776.8eb80a1ef3b100de075d28e6732c96d0. 2024-12-08T00:48:47,301 INFO [RS:0;0f983e3e5be1:33993 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-08T00:48:47,301 INFO [RS:0;0f983e3e5be1:33993 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-08T00:48:47,301 INFO [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 8eb80a1ef3b100de075d28e6732c96d0 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-12-08T00:48:47,302 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:47,302 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:47,302 INFO [RS:0;0f983e3e5be1:33993 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-08T00:48:47,302 DEBUG [RS:0;0f983e3e5be1:33993 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 8eb80a1ef3b100de075d28e6732c96d0=TestLogRolling-testLogRollOnDatanodeDeath,,1733618889776.8eb80a1ef3b100de075d28e6732c96d0.} 2024-12-08T00:48:47,302 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:47,302 DEBUG [RS:0;0f983e3e5be1:33993 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 8eb80a1ef3b100de075d28e6732c96d0 2024-12-08T00:48:47,302 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:47,302 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-08T00:48:47,302 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:47,302 INFO [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-08T00:48:47,302 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-08T00:48:47,302 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-08T00:48:47,302 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-08T00:48:47,303 INFO [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-12-08T00:48:47,303 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:47,303 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:47,303 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 2024-12-08T00:48:47,303 ERROR [FSHLog-0-hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956-prefix:0f983e3e5be1,33993,1733618888281.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:47,303 WARN [FSHLog-0-hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956-prefix:0f983e3e5be1,33993,1733618888281.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:47,303 DEBUG [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 0f983e3e5be1%2C33993%2C1733618888281.meta:.meta(num 1733618889450) roll requested 2024-12-08T00:48:47,303 INFO [regionserver/0f983e3e5be1:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0f983e3e5be1%2C33993%2C1733618888281.meta.1733618927303.meta 2024-12-08T00:48:47,303 WARN [IPC Server handler 2 on default port 34853 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 has not been closed. Lease recovery is in progress. RecoveryId = 1075 for block blk_1073741837_1013 2024-12-08T00:48:47,304 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 after 1ms 2024-12-08T00:48:47,308 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/.tmp/info/0248ce0d4337485494111e2e68197e42 is 1080, key is row0015/info:/1733618927012/Put/seqid=0 2024-12-08T00:48:47,315 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:47,315 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:47,315 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:47,315 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:47,315 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:47,316 INFO [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618927303.meta 2024-12-08T00:48:47,316 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:47,316 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45779,DS-6e4fd87f-9c96-44ad-9c61-d3c5cf3c11c8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:47,316 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta 2024-12-08T00:48:47,317 DEBUG [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43149:43149),(127.0.0.1/127.0.0.1:36397:36397)] 2024-12-08T00:48:47,317 DEBUG [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta is not closed yet, will try archiving it next time 2024-12-08T00:48:47,317 WARN [IPC Server handler 4 on default port 34853 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta has not been closed. Lease recovery is in progress. RecoveryId = 1078 for block blk_1073741834_1010 2024-12-08T00:48:47,317 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta after 1ms 2024-12-08T00:48:47,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41113 is added to blk_1073741892_1077 (size=14660) 2024-12-08T00:48:47,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36109 is added to blk_1073741892_1077 (size=14660) 2024-12-08T00:48:47,318 INFO [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/.tmp/info/0248ce0d4337485494111e2e68197e42 2024-12-08T00:48:47,324 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/.tmp/info/0248ce0d4337485494111e2e68197e42 as hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/info/0248ce0d4337485494111e2e68197e42 2024-12-08T00:48:47,329 INFO [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/info/0248ce0d4337485494111e2e68197e42, entries=9, sequenceid=78, filesize=14.3 K 2024-12-08T00:48:47,330 INFO [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9683, heapSize ~10.36 KB/10608, currentSize=0 B/0 for 8eb80a1ef3b100de075d28e6732c96d0 in 29ms, sequenceid=78, compaction requested=true 2024-12-08T00:48:47,331 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733618889776.8eb80a1ef3b100de075d28e6732c96d0.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/info/024b77978c2843a28ea6c9703e20f30a, hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/info/19cb33369ada4e769f05fc657a85049b, hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/info/017f5286c5e64f5cae412d722f8d4561, hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/info/1a657df4d64c4e6782ef675f1a8fb8ed, hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/info/d4f12b91364a478da9e5585debf7058a, hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/info/ae1965db5971462dba6354e23d6f7dbd] to archive 2024-12-08T00:48:47,332 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733618889776.8eb80a1ef3b100de075d28e6732c96d0.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-08T00:48:47,332 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/hbase/meta/1588230740/.tmp/info/5d1ad98ab0ea48e1b71547eecd381032 is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1733618889776.8eb80a1ef3b100de075d28e6732c96d0./info:regioninfo/1733618890148/Put/seqid=0 2024-12-08T00:48:47,334 WARN [Thread-1049 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741893_1079 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:48:47,334 WARN [Thread-1049 {}] hdfs.DataStreamer(1731): Error Recovery for BP-791727382-172.17.0.2-1733618886417:blk_1073741893_1079 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33451,DS-fb1c61b5-07ec-4bb6-9b70-43af2b3fbbb9,DISK], DatanodeInfoWithStorage[127.0.0.1:41113,DS-796f6687-3a76-48ad-a50d-1d0026d41077,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33451,DS-fb1c61b5-07ec-4bb6-9b70-43af2b3fbbb9,DISK]) is bad. 2024-12-08T00:48:47,334 WARN [Thread-1049 {}] hdfs.DataStreamer(1850): Abandoning BP-791727382-172.17.0.2-1733618886417:blk_1073741893_1079 2024-12-08T00:48:47,334 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733618889776.8eb80a1ef3b100de075d28e6732c96d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/info/024b77978c2843a28ea6c9703e20f30a to hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/info/024b77978c2843a28ea6c9703e20f30a 2024-12-08T00:48:47,334 WARN [Thread-1049 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33451,DS-fb1c61b5-07ec-4bb6-9b70-43af2b3fbbb9,DISK] 2024-12-08T00:48:47,336 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733618889776.8eb80a1ef3b100de075d28e6732c96d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/info/19cb33369ada4e769f05fc657a85049b to hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/info/19cb33369ada4e769f05fc657a85049b 2024-12-08T00:48:47,337 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733618889776.8eb80a1ef3b100de075d28e6732c96d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/info/017f5286c5e64f5cae412d722f8d4561 to hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/info/017f5286c5e64f5cae412d722f8d4561 2024-12-08T00:48:47,338 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733618889776.8eb80a1ef3b100de075d28e6732c96d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/info/1a657df4d64c4e6782ef675f1a8fb8ed to hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/info/1a657df4d64c4e6782ef675f1a8fb8ed 2024-12-08T00:48:47,340 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733618889776.8eb80a1ef3b100de075d28e6732c96d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/info/d4f12b91364a478da9e5585debf7058a to hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/info/d4f12b91364a478da9e5585debf7058a 2024-12-08T00:48:47,341 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733618889776.8eb80a1ef3b100de075d28e6732c96d0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/info/ae1965db5971462dba6354e23d6f7dbd to hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/info/ae1965db5971462dba6354e23d6f7dbd 2024-12-08T00:48:47,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41113 is added to blk_1073741894_1080 (size=7089) 2024-12-08T00:48:47,342 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733618889776.8eb80a1ef3b100de075d28e6732c96d0.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=0f983e3e5be1:41445 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-12-08T00:48:47,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36109 is added to blk_1073741894_1080 (size=7089) 2024-12-08T00:48:47,342 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733618889776.8eb80a1ef3b100de075d28e6732c96d0.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [024b77978c2843a28ea6c9703e20f30a=10347, 19cb33369ada4e769f05fc657a85049b=12506, 017f5286c5e64f5cae412d722f8d4561=17994, 1a657df4d64c4e6782ef675f1a8fb8ed=6027, d4f12b91364a478da9e5585debf7058a=6027, ae1965db5971462dba6354e23d6f7dbd=6027] 2024-12-08T00:48:47,343 INFO [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/hbase/meta/1588230740/.tmp/info/5d1ad98ab0ea48e1b71547eecd381032 2024-12-08T00:48:47,346 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb80a1ef3b100de075d28e6732c96d0/recovered.edits/81.seqid, newMaxSeqId=81, maxSeqId=1 2024-12-08T00:48:47,347 INFO [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733618889776.8eb80a1ef3b100de075d28e6732c96d0. 2024-12-08T00:48:47,347 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 8eb80a1ef3b100de075d28e6732c96d0: Waiting for close lock at 1733618927300Running coprocessor pre-close hooks at 1733618927300Disabling compacts and flushes for region at 1733618927300Disabling writes for close at 1733618927301 (+1 ms)Obtaining lock to block concurrent updates at 1733618927302 (+1 ms)Preparing flush snapshotting stores in 8eb80a1ef3b100de075d28e6732c96d0 at 1733618927302Finished memstore snapshotting TestLogRolling-testLogRollOnDatanodeDeath,,1733618889776.8eb80a1ef3b100de075d28e6732c96d0., syncing WAL and waiting on mvcc, flushsize=dataSize=9683, getHeapSize=10608, getOffHeapSize=0, getCellsCount=9 at 1733618927302Flushing stores of TestLogRolling-testLogRollOnDatanodeDeath,,1733618889776.8eb80a1ef3b100de075d28e6732c96d0. at 1733618927303 (+1 ms)Flushing 8eb80a1ef3b100de075d28e6732c96d0/info: creating writer at 1733618927303Flushing 8eb80a1ef3b100de075d28e6732c96d0/info: appending metadata at 1733618927308 (+5 ms)Flushing 8eb80a1ef3b100de075d28e6732c96d0/info: closing flushed file at 1733618927308Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2d5452ec: reopening flushed file at 1733618927323 (+15 ms)Finished flush of dataSize ~9.46 KB/9683, heapSize ~10.36 KB/10608, currentSize=0 B/0 for 8eb80a1ef3b100de075d28e6732c96d0 in 29ms, sequenceid=78, compaction requested=true at 1733618927330 (+7 ms)Writing region close event to WAL at 1733618927343 (+13 ms)Running coprocessor post-close hooks at 1733618927347 (+4 ms)Closed at 1733618927347 2024-12-08T00:48:47,347 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733618889776.8eb80a1ef3b100de075d28e6732c96d0. 2024-12-08T00:48:47,362 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/hbase/meta/1588230740/.tmp/ns/af2ee7f6935b432c93bd83e236192125 is 43, key is default/ns:d/1733618889550/Put/seqid=0 2024-12-08T00:48:47,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36109 is added to blk_1073741895_1081 (size=5153) 2024-12-08T00:48:47,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41113 is added to blk_1073741895_1081 (size=5153) 2024-12-08T00:48:47,367 INFO [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/hbase/meta/1588230740/.tmp/ns/af2ee7f6935b432c93bd83e236192125 2024-12-08T00:48:47,392 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/hbase/meta/1588230740/.tmp/table/8573df57a7394363b9e1fb3ef3ae78e0 is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1733618890159/Put/seqid=0 2024-12-08T00:48:47,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36109 is added to blk_1073741896_1082 (size=5424) 2024-12-08T00:48:47,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41113 is added to blk_1073741896_1082 (size=5424) 2024-12-08T00:48:47,397 INFO [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/hbase/meta/1588230740/.tmp/table/8573df57a7394363b9e1fb3ef3ae78e0 2024-12-08T00:48:47,404 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/hbase/meta/1588230740/.tmp/info/5d1ad98ab0ea48e1b71547eecd381032 as hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/hbase/meta/1588230740/info/5d1ad98ab0ea48e1b71547eecd381032 2024-12-08T00:48:47,409 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.1733618911181 to hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/oldWALs/0f983e3e5be1%2C33993%2C1733618888281.1733618911181 2024-12-08T00:48:47,410 INFO [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/hbase/meta/1588230740/info/5d1ad98ab0ea48e1b71547eecd381032, entries=10, sequenceid=11, filesize=6.9 K 2024-12-08T00:48:47,411 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/hbase/meta/1588230740/.tmp/ns/af2ee7f6935b432c93bd83e236192125 as hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/hbase/meta/1588230740/ns/af2ee7f6935b432c93bd83e236192125 2024-12-08T00:48:47,417 INFO [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/hbase/meta/1588230740/ns/af2ee7f6935b432c93bd83e236192125, entries=2, sequenceid=11, filesize=5.0 K 2024-12-08T00:48:47,418 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/hbase/meta/1588230740/.tmp/table/8573df57a7394363b9e1fb3ef3ae78e0 as hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/hbase/meta/1588230740/table/8573df57a7394363b9e1fb3ef3ae78e0 2024-12-08T00:48:47,424 INFO [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/hbase/meta/1588230740/table/8573df57a7394363b9e1fb3ef3ae78e0, entries=2, sequenceid=11, filesize=5.3 K 2024-12-08T00:48:47,425 INFO [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 123ms, sequenceid=11, compaction requested=false 2024-12-08T00:48:47,432 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-08T00:48:47,433 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-08T00:48:47,433 INFO [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-08T00:48:47,433 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733618927302Running coprocessor pre-close hooks at 1733618927302Disabling compacts and flushes for region at 1733618927302Disabling writes for close at 1733618927302Obtaining lock to block concurrent updates at 1733618927303 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1733618927303Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1733618927303Flushing stores of hbase:meta,,1.1588230740 at 1733618927317 (+14 ms)Flushing 1588230740/info: creating writer at 1733618927318 (+1 ms)Flushing 1588230740/info: appending metadata at 1733618927332 (+14 ms)Flushing 1588230740/info: closing flushed file at 1733618927332Flushing 1588230740/ns: creating writer at 1733618927348 (+16 ms)Flushing 1588230740/ns: appending metadata at 1733618927362 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1733618927362Flushing 1588230740/table: creating writer at 1733618927373 (+11 ms)Flushing 1588230740/table: appending metadata at 1733618927391 (+18 ms)Flushing 1588230740/table: closing flushed file at 1733618927391Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@31050c2: reopening flushed file at 1733618927403 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5ba81402: reopening flushed file at 1733618927410 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4b43f6ce: reopening flushed file at 1733618927417 (+7 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 123ms, sequenceid=11, compaction requested=false at 1733618927425 (+8 ms)Writing region close event to WAL at 1733618927428 (+3 ms)Running coprocessor post-close hooks at 1733618927433 (+5 ms)Closed at 1733618927433 2024-12-08T00:48:47,433 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-08T00:48:47,502 INFO [RS:0;0f983e3e5be1:33993 {}] regionserver.HRegionServer(976): stopping server 0f983e3e5be1,33993,1733618888281; all regions closed. 2024-12-08T00:48:47,503 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:47,503 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:47,503 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:47,504 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:47,504 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:47,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36109 is added to blk_1073741891_1076 (size=825) 2024-12-08T00:48:47,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41113 is added to blk_1073741891_1076 (size=825) 2024-12-08T00:48:47,734 INFO [regionserver/0f983e3e5be1:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-08T00:48:47,735 INFO [regionserver/0f983e3e5be1:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-08T00:48:47,751 INFO [regionserver/0f983e3e5be1:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T00:48:47,824 INFO [regionserver/0f983e3e5be1:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-08T00:48:47,824 INFO [regionserver/0f983e3e5be1:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-08T00:48:48,679 INFO [regionserver/0f983e3e5be1:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T00:48:49,111 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@a350071[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:36109, datanodeUuid=cab2c1f4-0b38-495b-81c5-dd7bc55561eb, infoPort=36397, infoSecurePort=0, ipcPort=41757, storageInfo=lv=-57;cid=testClusterID;nsid=1708086423;c=1733618886417):Failed to transfer BP-791727382-172.17.0.2-1733618886417:blk_1073741877_1060 to 127.0.0.1:33451 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:48:49,568 INFO [master/0f983e3e5be1:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-08T00:48:49,568 INFO [master/0f983e3e5be1:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-08T00:48:49,828 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@7407cc5f[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:41113, datanodeUuid=94032d42-a92f-496f-a54d-179c92434f44, infoPort=43149, infoSecurePort=0, ipcPort=39465, storageInfo=lv=-57;cid=testClusterID;nsid=1708086423;c=1733618886417):Failed to transfer BP-791727382-172.17.0.2-1733618886417:blk_1073741825_1001 to 127.0.0.1:33451 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:48:49,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36109 is added to blk_1073741827_1003 (size=196) 2024-12-08T00:48:50,829 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4a53e685[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:41113, datanodeUuid=94032d42-a92f-496f-a54d-179c92434f44, infoPort=43149, infoSecurePort=0, ipcPort=39465, storageInfo=lv=-57;cid=testClusterID;nsid=1708086423;c=1733618886417):Failed to transfer BP-791727382-172.17.0.2-1733618886417:blk_1073741836_1012 to 127.0.0.1:33451 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:48:50,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36109 is added to blk_1073741828_1004 (size=1189) 2024-12-08T00:48:51,306 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 after 4003ms 2024-12-08T00:48:51,353 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta after 4037ms 2024-12-08T00:48:52,303 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-12-08T00:48:52,309 DEBUG [RS:1;0f983e3e5be1:46325 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/oldWALs 2024-12-08T00:48:52,309 INFO [RS:1;0f983e3e5be1:46325 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 0f983e3e5be1%2C46325%2C1733618889648:(num 1733618889880) 2024-12-08T00:48:52,309 DEBUG [RS:1;0f983e3e5be1:46325 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:48:52,310 INFO [RS:1;0f983e3e5be1:46325 {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T00:48:52,310 INFO [RS:1;0f983e3e5be1:46325 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T00:48:52,310 INFO [RS:1;0f983e3e5be1:46325 {}] hbase.ChoreService(370): Chore service for: regionserver/0f983e3e5be1:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-08T00:48:52,310 INFO [RS:1;0f983e3e5be1:46325 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-08T00:48:52,311 INFO [RS:1;0f983e3e5be1:46325 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-08T00:48:52,311 INFO [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T00:48:52,311 INFO [RS:1;0f983e3e5be1:46325 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-08T00:48:52,311 INFO [RS:1;0f983e3e5be1:46325 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T00:48:52,311 INFO [RS:1;0f983e3e5be1:46325 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46325 2024-12-08T00:48:52,349 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:48:52,352 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41445-0x10002f2ac5a0000, quorum=127.0.0.1:53241, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T00:48:52,352 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46325-0x10002f2ac5a0002, quorum=127.0.0.1:53241, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/0f983e3e5be1,46325,1733618889648 2024-12-08T00:48:52,353 INFO [RS:1;0f983e3e5be1:46325 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T00:48:52,354 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [0f983e3e5be1,46325,1733618889648] 2024-12-08T00:48:52,372 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/0f983e3e5be1,46325,1733618889648 already deleted, retry=false 2024-12-08T00:48:52,372 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 0f983e3e5be1,46325,1733618889648 expired; onlineServers=1 2024-12-08T00:48:52,375 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:48:52,375 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:48:52,376 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:48:52,376 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:48:52,376 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:48:52,384 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:48:52,384 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:48:52,464 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46325-0x10002f2ac5a0002, quorum=127.0.0.1:53241, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T00:48:52,464 INFO [RS:1;0f983e3e5be1:46325 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T00:48:52,464 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46325-0x10002f2ac5a0002, quorum=127.0.0.1:53241, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T00:48:52,464 INFO [RS:1;0f983e3e5be1:46325 {}] regionserver.HRegionServer(1031): Exiting; stopping=0f983e3e5be1,46325,1733618889648; zookeeper connection closed. 2024-12-08T00:48:52,465 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@64ba2492 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@64ba2492 2024-12-08T00:48:52,505 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-12-08T00:48:52,512 DEBUG [RS:0;0f983e3e5be1:33993 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/oldWALs 2024-12-08T00:48:52,512 INFO [RS:0;0f983e3e5be1:33993 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 0f983e3e5be1%2C33993%2C1733618888281.meta:.meta(num 1733618927303) 2024-12-08T00:48:52,513 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:52,513 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:52,513 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:52,513 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:52,513 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:52,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36109 is added to blk_1073741889_1073 (size=14682) 2024-12-08T00:48:52,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41113 is added to blk_1073741889_1073 (size=14682) 2024-12-08T00:48:52,518 DEBUG [RS:0;0f983e3e5be1:33993 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/oldWALs 2024-12-08T00:48:52,518 INFO [RS:0;0f983e3e5be1:33993 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 0f983e3e5be1%2C33993%2C1733618888281:(num 1733618926994) 2024-12-08T00:48:52,518 DEBUG [RS:0;0f983e3e5be1:33993 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:48:52,518 INFO [RS:0;0f983e3e5be1:33993 {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T00:48:52,518 INFO [RS:0;0f983e3e5be1:33993 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T00:48:52,518 INFO [RS:0;0f983e3e5be1:33993 {}] hbase.ChoreService(370): Chore service for: regionserver/0f983e3e5be1:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-08T00:48:52,519 INFO [RS:0;0f983e3e5be1:33993 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T00:48:52,519 INFO [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T00:48:52,519 INFO [RS:0;0f983e3e5be1:33993 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33993 2024-12-08T00:48:52,544 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33993-0x10002f2ac5a0001, quorum=127.0.0.1:53241, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/0f983e3e5be1,33993,1733618888281 2024-12-08T00:48:52,544 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41445-0x10002f2ac5a0000, quorum=127.0.0.1:53241, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T00:48:52,544 INFO [RS:0;0f983e3e5be1:33993 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T00:48:52,552 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [0f983e3e5be1,33993,1733618888281] 2024-12-08T00:48:52,564 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/0f983e3e5be1,33993,1733618888281 already deleted, retry=false 2024-12-08T00:48:52,564 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 0f983e3e5be1,33993,1733618888281 expired; onlineServers=0 2024-12-08T00:48:52,564 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '0f983e3e5be1,41445,1733618888143' ***** 2024-12-08T00:48:52,564 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-08T00:48:52,565 INFO [M:0;0f983e3e5be1:41445 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T00:48:52,565 INFO [M:0;0f983e3e5be1:41445 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T00:48:52,565 DEBUG [M:0;0f983e3e5be1:41445 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-08T00:48:52,565 DEBUG [M:0;0f983e3e5be1:41445 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-08T00:48:52,565 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-08T00:48:52,565 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster-HFileCleaner.small.0-1733618888603 {}] cleaner.HFileCleaner(306): Exit Thread[master/0f983e3e5be1:0:becomeActiveMaster-HFileCleaner.small.0-1733618888603,5,FailOnTimeoutGroup] 2024-12-08T00:48:52,565 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster-HFileCleaner.large.0-1733618888597 {}] cleaner.HFileCleaner(306): Exit Thread[master/0f983e3e5be1:0:becomeActiveMaster-HFileCleaner.large.0-1733618888597,5,FailOnTimeoutGroup] 2024-12-08T00:48:52,566 INFO [M:0;0f983e3e5be1:41445 {}] hbase.ChoreService(370): Chore service for: master/0f983e3e5be1:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-08T00:48:52,566 INFO [M:0;0f983e3e5be1:41445 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T00:48:52,566 DEBUG [M:0;0f983e3e5be1:41445 {}] master.HMaster(1795): Stopping service threads 2024-12-08T00:48:52,566 INFO [M:0;0f983e3e5be1:41445 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-08T00:48:52,567 INFO [M:0;0f983e3e5be1:41445 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-08T00:48:52,567 INFO [M:0;0f983e3e5be1:41445 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-08T00:48:52,567 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-08T00:48:52,577 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41445-0x10002f2ac5a0000, quorum=127.0.0.1:53241, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-08T00:48:52,577 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41445-0x10002f2ac5a0000, quorum=127.0.0.1:53241, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:48:52,577 DEBUG [M:0;0f983e3e5be1:41445 {}] zookeeper.ZKUtil(347): master:41445-0x10002f2ac5a0000, quorum=127.0.0.1:53241, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-08T00:48:52,577 WARN [M:0;0f983e3e5be1:41445 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-08T00:48:52,577 INFO [M:0;0f983e3e5be1:41445 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/.lastflushedseqids 2024-12-08T00:48:52,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36109 is added to blk_1073741897_1083 (size=130) 2024-12-08T00:48:52,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41113 is added to blk_1073741897_1083 (size=130) 2024-12-08T00:48:52,582 INFO [M:0;0f983e3e5be1:41445 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-08T00:48:52,583 INFO [M:0;0f983e3e5be1:41445 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-08T00:48:52,583 DEBUG [M:0;0f983e3e5be1:41445 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-08T00:48:52,583 INFO [M:0;0f983e3e5be1:41445 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:48:52,583 DEBUG [M:0;0f983e3e5be1:41445 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:48:52,583 DEBUG [M:0;0f983e3e5be1:41445 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-08T00:48:52,583 DEBUG [M:0;0f983e3e5be1:41445 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:48:52,583 INFO [M:0;0f983e3e5be1:41445 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.24 KB heapSize=29.47 KB 2024-12-08T00:48:52,596 DEBUG [M:0;0f983e3e5be1:41445 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c0f6a26f2aa741e984837ad0c8422dde is 82, key is hbase:meta,,1/info:regioninfo/1733618889490/Put/seqid=0 2024-12-08T00:48:52,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36109 is added to blk_1073741898_1084 (size=5672) 2024-12-08T00:48:52,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41113 is added to blk_1073741898_1084 (size=5672) 2024-12-08T00:48:52,601 INFO [M:0;0f983e3e5be1:41445 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c0f6a26f2aa741e984837ad0c8422dde 2024-12-08T00:48:52,621 DEBUG [M:0;0f983e3e5be1:41445 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/61221912e259451db4f81555f0dfa5ec is 773, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733618890164/Put/seqid=0 2024-12-08T00:48:52,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41113 is added to blk_1073741899_1085 (size=6254) 2024-12-08T00:48:52,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36109 is added to blk_1073741899_1085 (size=6254) 2024-12-08T00:48:52,627 INFO [M:0;0f983e3e5be1:41445 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.57 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/61221912e259451db4f81555f0dfa5ec 2024-12-08T00:48:52,631 INFO [M:0;0f983e3e5be1:41445 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 61221912e259451db4f81555f0dfa5ec 2024-12-08T00:48:52,644 DEBUG [M:0;0f983e3e5be1:41445 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/45bfb50a7616418995738f92f5eadfcb is 69, key is 0f983e3e5be1,33993,1733618888281/rs:state/1733618888650/Put/seqid=0 2024-12-08T00:48:52,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41113 is added to blk_1073741900_1086 (size=5224) 2024-12-08T00:48:52,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36109 is added to blk_1073741900_1086 (size=5224) 2024-12-08T00:48:52,652 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33993-0x10002f2ac5a0001, quorum=127.0.0.1:53241, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T00:48:52,652 INFO [RS:0;0f983e3e5be1:33993 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T00:48:52,652 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33993-0x10002f2ac5a0001, quorum=127.0.0.1:53241, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T00:48:52,652 INFO [RS:0;0f983e3e5be1:33993 {}] regionserver.HRegionServer(1031): Exiting; stopping=0f983e3e5be1,33993,1733618888281; zookeeper connection closed. 2024-12-08T00:48:52,652 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@355be7ab {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@355be7ab 2024-12-08T00:48:52,653 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-12-08T00:48:52,887 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-08T00:48:52,907 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:48:52,907 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:48:52,908 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:48:52,908 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:48:52,908 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:48:52,908 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:48:52,911 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:48:52,913 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:48:53,050 INFO [M:0;0f983e3e5be1:41445 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/45bfb50a7616418995738f92f5eadfcb 2024-12-08T00:48:53,074 DEBUG [M:0;0f983e3e5be1:41445 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/37105aea67074e248501da6985743b60 is 52, key is load_balancer_on/state:d/1733618889633/Put/seqid=0 2024-12-08T00:48:53,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36109 is added to blk_1073741901_1087 (size=5056) 2024-12-08T00:48:53,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41113 is added to blk_1073741901_1087 (size=5056) 2024-12-08T00:48:53,078 INFO [M:0;0f983e3e5be1:41445 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/37105aea67074e248501da6985743b60 2024-12-08T00:48:53,084 DEBUG [M:0;0f983e3e5be1:41445 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c0f6a26f2aa741e984837ad0c8422dde as hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c0f6a26f2aa741e984837ad0c8422dde 2024-12-08T00:48:53,089 INFO [M:0;0f983e3e5be1:41445 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c0f6a26f2aa741e984837ad0c8422dde, entries=8, sequenceid=60, filesize=5.5 K 2024-12-08T00:48:53,090 DEBUG [M:0;0f983e3e5be1:41445 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/61221912e259451db4f81555f0dfa5ec as hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/61221912e259451db4f81555f0dfa5ec 2024-12-08T00:48:53,094 INFO [M:0;0f983e3e5be1:41445 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 61221912e259451db4f81555f0dfa5ec 2024-12-08T00:48:53,094 INFO [M:0;0f983e3e5be1:41445 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/61221912e259451db4f81555f0dfa5ec, entries=6, sequenceid=60, filesize=6.1 K 2024-12-08T00:48:53,095 DEBUG [M:0;0f983e3e5be1:41445 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/45bfb50a7616418995738f92f5eadfcb as hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/45bfb50a7616418995738f92f5eadfcb 2024-12-08T00:48:53,100 INFO [M:0;0f983e3e5be1:41445 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/45bfb50a7616418995738f92f5eadfcb, entries=2, sequenceid=60, filesize=5.1 K 2024-12-08T00:48:53,101 DEBUG [M:0;0f983e3e5be1:41445 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/37105aea67074e248501da6985743b60 as hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/37105aea67074e248501da6985743b60 2024-12-08T00:48:53,105 INFO [M:0;0f983e3e5be1:41445 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/37105aea67074e248501da6985743b60, entries=1, sequenceid=60, filesize=4.9 K 2024-12-08T00:48:53,106 INFO [M:0;0f983e3e5be1:41445 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.24 KB/23793, heapSize ~29.41 KB/30112, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 523ms, sequenceid=60, compaction requested=false 2024-12-08T00:48:53,108 INFO [M:0;0f983e3e5be1:41445 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:48:53,108 DEBUG [M:0;0f983e3e5be1:41445 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733618932583Disabling compacts and flushes for region at 1733618932583Disabling writes for close at 1733618932583Obtaining lock to block concurrent updates at 1733618932583Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733618932583Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23793, getHeapSize=30112, getOffHeapSize=0, getCellsCount=71 at 1733618932583Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733618932584 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733618932584Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733618932596 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733618932596Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733618932606 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733618932620 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733618932620Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733618932632 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733618932644 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733618932644Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733618933059 (+415 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733618933073 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733618933073Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3d613c1f: reopening flushed file at 1733618933083 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1c03aa26: reopening flushed file at 1733618933089 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@38250b11: reopening flushed file at 1733618933094 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@d1adc1a: reopening flushed file at 1733618933100 (+6 ms)Finished flush of dataSize ~23.24 KB/23793, heapSize ~29.41 KB/30112, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 523ms, sequenceid=60, compaction requested=false at 1733618933106 (+6 ms)Writing region close event to WAL at 1733618933107 (+1 ms)Closed at 1733618933107 2024-12-08T00:48:53,108 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:53,108 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:53,108 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:53,108 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:53,108 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:48:53,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41113 is added to blk_1073741888_1071 (size=1045) 2024-12-08T00:48:53,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36109 is added to blk_1073741888_1071 (size=1045) 2024-12-08T00:48:53,308 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:48:53,354 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:48:53,849 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@c5cec96 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-791727382-172.17.0.2-1733618886417:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:45779,null,null]) java.net.ConnectException: Call From 0f983e3e5be1/172.17.0.2 to localhost:42159 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-08T00:48:54,309 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:48:54,355 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:48:54,631 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/MasterData/WALs/0f983e3e5be1,41445,1733618888143/0f983e3e5be1%2C41445%2C1733618888143.1733618888401 to hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/MasterData/oldWALs/0f983e3e5be1%2C41445%2C1733618888143.1733618888401 2024-12-08T00:48:54,639 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/MasterData/oldWALs/0f983e3e5be1%2C41445%2C1733618888143.1733618888401 to hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/oldWALs/0f983e3e5be1%2C41445%2C1733618888143.1733618888401$masterlocalwal$ 2024-12-08T00:48:54,639 INFO [M:0;0f983e3e5be1:41445 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-08T00:48:54,639 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T00:48:54,640 INFO [M:0;0f983e3e5be1:41445 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41445 2024-12-08T00:48:54,640 INFO [M:0;0f983e3e5be1:41445 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T00:48:54,785 INFO [M:0;0f983e3e5be1:41445 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T00:48:54,785 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41445-0x10002f2ac5a0000, quorum=127.0.0.1:53241, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T00:48:54,785 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41445-0x10002f2ac5a0000, quorum=127.0.0.1:53241, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T00:48:54,791 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@358cd92{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T00:48:54,792 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@207a6434{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T00:48:54,793 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T00:48:54,793 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4f38666d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T00:48:54,793 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@13a29b23{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/hadoop.log.dir/,STOPPED} 2024-12-08T00:48:54,795 WARN [BP-791727382-172.17.0.2-1733618886417 heartbeating to localhost/127.0.0.1:34853 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T00:48:54,795 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T00:48:54,795 WARN [BP-791727382-172.17.0.2-1733618886417 heartbeating to localhost/127.0.0.1:34853 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-791727382-172.17.0.2-1733618886417 (Datanode Uuid 94032d42-a92f-496f-a54d-179c92434f44) service to localhost/127.0.0.1:34853 2024-12-08T00:48:54,795 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T00:48:54,795 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@1bcd67ae {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-791727382-172.17.0.2-1733618886417:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:45779,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:42159 , LocalHost:localPort 0f983e3e5be1/172.17.0.2:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-08T00:48:54,796 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@1bcd67ae {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-791727382-172.17.0.2-1733618886417:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:41113,null,null]) java.io.IOException: No block pool offer service for bpid=BP-791727382-172.17.0.2-1733618886417 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:48:54,796 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/cluster_e74a2886-159c-d840-b9ca-6af3ceb02e7c/data/data3/current/BP-791727382-172.17.0.2-1733618886417 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T00:48:54,796 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@1bcd67ae {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-791727382-172.17.0.2-1733618886417:blk_1073741837_1013; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:45779,null,null], DatanodeInfoWithStorage[127.0.0.1:41113,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-791727382-172.17.0.2-1733618886417:blk_1073741837_1013, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:45779,null,null], DatanodeInfoWithStorage[127.0.0.1:41113,null,null]] 2024-12-08T00:48:54,796 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/cluster_e74a2886-159c-d840-b9ca-6af3ceb02e7c/data/data4/current/BP-791727382-172.17.0.2-1733618886417 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T00:48:54,796 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@1bcd67ae {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-791727382-172.17.0.2-1733618886417:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:41113,null,null]) java.io.IOException: No block pool offer service for bpid=BP-791727382-172.17.0.2-1733618886417 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:48:54,796 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@1bcd67ae {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-791727382-172.17.0.2-1733618886417:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:45779,null,null]) java.io.IOException: No block pool offer service for bpid=BP-791727382-172.17.0.2-1733618886417 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:48:54,796 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T00:48:54,796 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@1bcd67ae {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-791727382-172.17.0.2-1733618886417:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:41113,null,null], DatanodeInfoWithStorage[127.0.0.1:45779,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-791727382-172.17.0.2-1733618886417:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:41113,null,null], DatanodeInfoWithStorage[127.0.0.1:45779,null,null]] 2024-12-08T00:48:54,799 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@b3c44a7{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T00:48:54,800 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6682cb77{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T00:48:54,800 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T00:48:54,800 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3aae8f75{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T00:48:54,800 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@66f331b6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/hadoop.log.dir/,STOPPED} 2024-12-08T00:48:54,801 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T00:48:54,801 WARN [BP-791727382-172.17.0.2-1733618886417 heartbeating to localhost/127.0.0.1:34853 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T00:48:54,801 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T00:48:54,801 WARN [BP-791727382-172.17.0.2-1733618886417 heartbeating to localhost/127.0.0.1:34853 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-791727382-172.17.0.2-1733618886417 (Datanode Uuid cab2c1f4-0b38-495b-81c5-dd7bc55561eb) service to localhost/127.0.0.1:34853 2024-12-08T00:48:54,801 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/cluster_e74a2886-159c-d840-b9ca-6af3ceb02e7c/data/data9/current/BP-791727382-172.17.0.2-1733618886417 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T00:48:54,802 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/cluster_e74a2886-159c-d840-b9ca-6af3ceb02e7c/data/data10/current/BP-791727382-172.17.0.2-1733618886417 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T00:48:54,802 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T00:48:54,806 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2aec9805{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-08T00:48:54,807 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7cd8d56a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T00:48:54,807 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T00:48:54,807 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@10e6067e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T00:48:54,807 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@151bb937{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/hadoop.log.dir/,STOPPED} 2024-12-08T00:48:54,814 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-08T00:48:54,839 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-08T00:48:54,845 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=157 (was 82) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007f5808bf1628.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-19-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-19-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34853 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:34853 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:41579 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:34853 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34853 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-18-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:41579 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:34853 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:34853 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007f5808bf1628.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:34853 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34853 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34853 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:34853 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:34853 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-19-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-18-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-18-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=450 (was 402) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=26 (was 49), ProcessCount=11 (was 11), AvailableMemoryMB=17476 (was 17904) 2024-12-08T00:48:54,851 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=157, OpenFileDescriptor=450, MaxFileDescriptor=1048576, SystemLoadAverage=26, ProcessCount=11, AvailableMemoryMB=17477 2024-12-08T00:48:54,851 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-08T00:48:54,852 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/hadoop.log.dir so I do NOT create it in target/test-data/982cec9f-9f6b-3bb2-8eb0-96cfa4313788 2024-12-08T00:48:54,852 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b75e2a83-c7d9-b2eb-d07f-4642155fe2a3/hadoop.tmp.dir so I do NOT create it in target/test-data/982cec9f-9f6b-3bb2-8eb0-96cfa4313788 2024-12-08T00:48:54,852 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/982cec9f-9f6b-3bb2-8eb0-96cfa4313788/cluster_c1909289-147f-3345-59fd-781f6482af86, deleteOnExit=true 2024-12-08T00:48:54,852 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-08T00:48:54,852 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/982cec9f-9f6b-3bb2-8eb0-96cfa4313788/test.cache.data in system properties and HBase conf 2024-12-08T00:48:54,852 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/982cec9f-9f6b-3bb2-8eb0-96cfa4313788/hadoop.tmp.dir in system properties and HBase conf 2024-12-08T00:48:54,852 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/982cec9f-9f6b-3bb2-8eb0-96cfa4313788/hadoop.log.dir in system properties and HBase conf 2024-12-08T00:48:54,852 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/982cec9f-9f6b-3bb2-8eb0-96cfa4313788/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-08T00:48:54,852 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/982cec9f-9f6b-3bb2-8eb0-96cfa4313788/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-08T00:48:54,852 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-08T00:48:54,852 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-08T00:48:54,852 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/982cec9f-9f6b-3bb2-8eb0-96cfa4313788/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-08T00:48:54,852 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/982cec9f-9f6b-3bb2-8eb0-96cfa4313788/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-08T00:48:54,853 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/982cec9f-9f6b-3bb2-8eb0-96cfa4313788/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-08T00:48:54,853 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/982cec9f-9f6b-3bb2-8eb0-96cfa4313788/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-08T00:48:54,853 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/982cec9f-9f6b-3bb2-8eb0-96cfa4313788/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-08T00:48:54,853 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/982cec9f-9f6b-3bb2-8eb0-96cfa4313788/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-08T00:48:54,853 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/982cec9f-9f6b-3bb2-8eb0-96cfa4313788/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-08T00:48:54,853 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/982cec9f-9f6b-3bb2-8eb0-96cfa4313788/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-08T00:48:54,853 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/982cec9f-9f6b-3bb2-8eb0-96cfa4313788/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-08T00:48:54,853 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/982cec9f-9f6b-3bb2-8eb0-96cfa4313788/nfs.dump.dir in system properties and HBase conf 2024-12-08T00:48:54,853 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/982cec9f-9f6b-3bb2-8eb0-96cfa4313788/java.io.tmpdir in system properties and HBase conf 2024-12-08T00:48:54,853 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/982cec9f-9f6b-3bb2-8eb0-96cfa4313788/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-08T00:48:54,853 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/982cec9f-9f6b-3bb2-8eb0-96cfa4313788/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-08T00:48:54,853 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/982cec9f-9f6b-3bb2-8eb0-96cfa4313788/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-08T00:48:54,864 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-08T00:48:55,123 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T00:48:55,127 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T00:48:55,128 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T00:48:55,128 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T00:48:55,128 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-08T00:48:55,128 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T00:48:55,129 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2aa6e3f6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/982cec9f-9f6b-3bb2-8eb0-96cfa4313788/hadoop.log.dir/,AVAILABLE} 2024-12-08T00:48:55,129 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@38bb274c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T00:48:55,218 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@36ee8469{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/982cec9f-9f6b-3bb2-8eb0-96cfa4313788/java.io.tmpdir/jetty-localhost-38263-hadoop-hdfs-3_4_1-tests_jar-_-any-4492973252508450453/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-08T00:48:55,218 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@41098146{HTTP/1.1, (http/1.1)}{localhost:38263} 2024-12-08T00:48:55,218 INFO [Time-limited test {}] server.Server(415): Started @154648ms 2024-12-08T00:48:55,228 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-08T00:48:55,311 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:48:55,356 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:48:55,429 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T00:48:55,432 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T00:48:55,433 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T00:48:55,433 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T00:48:55,433 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T00:48:55,433 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7d5b7744{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/982cec9f-9f6b-3bb2-8eb0-96cfa4313788/hadoop.log.dir/,AVAILABLE} 2024-12-08T00:48:55,434 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@63477bff{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T00:48:55,525 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@108cc47d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/982cec9f-9f6b-3bb2-8eb0-96cfa4313788/java.io.tmpdir/jetty-localhost-43245-hadoop-hdfs-3_4_1-tests_jar-_-any-7147764259936597363/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T00:48:55,525 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@72f811fc{HTTP/1.1, (http/1.1)}{localhost:43245} 2024-12-08T00:48:55,526 INFO [Time-limited test {}] server.Server(415): Started @154956ms 2024-12-08T00:48:55,527 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T00:48:55,553 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T00:48:55,556 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T00:48:55,557 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T00:48:55,557 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T00:48:55,557 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T00:48:55,557 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@124b3c60{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/982cec9f-9f6b-3bb2-8eb0-96cfa4313788/hadoop.log.dir/,AVAILABLE} 2024-12-08T00:48:55,558 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3df67424{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T00:48:55,646 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7098476e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/982cec9f-9f6b-3bb2-8eb0-96cfa4313788/java.io.tmpdir/jetty-localhost-32971-hadoop-hdfs-3_4_1-tests_jar-_-any-3882043451807489634/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T00:48:55,647 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2c66348c{HTTP/1.1, (http/1.1)}{localhost:32971} 2024-12-08T00:48:55,647 INFO [Time-limited test {}] server.Server(415): Started @155077ms 2024-12-08T00:48:55,648 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T00:48:55,911 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-12-08T00:48:55,912 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-08T00:48:55,912 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-08T00:48:55,912 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-08T00:48:56,231 WARN [Thread-1194 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/982cec9f-9f6b-3bb2-8eb0-96cfa4313788/cluster_c1909289-147f-3345-59fd-781f6482af86/data/data1/current/BP-2043044047-172.17.0.2-1733618934874/current, will proceed with Du for space computation calculation, 2024-12-08T00:48:56,231 WARN [Thread-1195 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/982cec9f-9f6b-3bb2-8eb0-96cfa4313788/cluster_c1909289-147f-3345-59fd-781f6482af86/data/data2/current/BP-2043044047-172.17.0.2-1733618934874/current, will proceed with Du for space computation calculation, 2024-12-08T00:48:56,256 WARN [Thread-1158 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T00:48:56,258 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa3472f78e7b77a1 with lease ID 0x1d416edf38cf916f: Processing first storage report for DS-3e53e221-56b1-47c9-a3fe-5e8efbec7405 from datanode DatanodeRegistration(127.0.0.1:33349, datanodeUuid=79655df2-a483-4591-9bcf-bcb8d8bbb0fa, infoPort=39401, infoSecurePort=0, ipcPort=37025, storageInfo=lv=-57;cid=testClusterID;nsid=28432284;c=1733618934874) 2024-12-08T00:48:56,258 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa3472f78e7b77a1 with lease ID 0x1d416edf38cf916f: from storage DS-3e53e221-56b1-47c9-a3fe-5e8efbec7405 node DatanodeRegistration(127.0.0.1:33349, datanodeUuid=79655df2-a483-4591-9bcf-bcb8d8bbb0fa, infoPort=39401, infoSecurePort=0, ipcPort=37025, storageInfo=lv=-57;cid=testClusterID;nsid=28432284;c=1733618934874), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T00:48:56,258 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa3472f78e7b77a1 with lease ID 0x1d416edf38cf916f: Processing first storage report for DS-94ffcfad-c88a-4f09-8130-3e8660bb6f6a from datanode DatanodeRegistration(127.0.0.1:33349, datanodeUuid=79655df2-a483-4591-9bcf-bcb8d8bbb0fa, infoPort=39401, infoSecurePort=0, ipcPort=37025, storageInfo=lv=-57;cid=testClusterID;nsid=28432284;c=1733618934874) 2024-12-08T00:48:56,258 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa3472f78e7b77a1 with lease ID 0x1d416edf38cf916f: from storage DS-94ffcfad-c88a-4f09-8130-3e8660bb6f6a node DatanodeRegistration(127.0.0.1:33349, datanodeUuid=79655df2-a483-4591-9bcf-bcb8d8bbb0fa, infoPort=39401, infoSecurePort=0, ipcPort=37025, storageInfo=lv=-57;cid=testClusterID;nsid=28432284;c=1733618934874), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T00:48:56,312 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:48:56,357 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:48:56,373 WARN [Thread-1205 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/982cec9f-9f6b-3bb2-8eb0-96cfa4313788/cluster_c1909289-147f-3345-59fd-781f6482af86/data/data3/current/BP-2043044047-172.17.0.2-1733618934874/current, will proceed with Du for space computation calculation, 2024-12-08T00:48:56,374 WARN [Thread-1206 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/982cec9f-9f6b-3bb2-8eb0-96cfa4313788/cluster_c1909289-147f-3345-59fd-781f6482af86/data/data4/current/BP-2043044047-172.17.0.2-1733618934874/current, will proceed with Du for space computation calculation, 2024-12-08T00:48:56,391 WARN [Thread-1181 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T00:48:56,394 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x26cc6055cea63a67 with lease ID 0x1d416edf38cf9170: Processing first storage report for DS-58b1bf7e-5c6e-4739-b326-ae88a0799027 from datanode DatanodeRegistration(127.0.0.1:43949, datanodeUuid=5584fa3b-e14c-4e79-ba2e-d4b3219a978d, infoPort=33267, infoSecurePort=0, ipcPort=41803, storageInfo=lv=-57;cid=testClusterID;nsid=28432284;c=1733618934874) 2024-12-08T00:48:56,394 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x26cc6055cea63a67 with lease ID 0x1d416edf38cf9170: from storage DS-58b1bf7e-5c6e-4739-b326-ae88a0799027 node DatanodeRegistration(127.0.0.1:43949, datanodeUuid=5584fa3b-e14c-4e79-ba2e-d4b3219a978d, infoPort=33267, infoSecurePort=0, ipcPort=41803, storageInfo=lv=-57;cid=testClusterID;nsid=28432284;c=1733618934874), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T00:48:56,394 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x26cc6055cea63a67 with lease ID 0x1d416edf38cf9170: Processing first storage report for DS-54b21332-71bc-4a57-89dc-03ebbff72250 from datanode DatanodeRegistration(127.0.0.1:43949, datanodeUuid=5584fa3b-e14c-4e79-ba2e-d4b3219a978d, infoPort=33267, infoSecurePort=0, ipcPort=41803, storageInfo=lv=-57;cid=testClusterID;nsid=28432284;c=1733618934874) 2024-12-08T00:48:56,394 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x26cc6055cea63a67 with lease ID 0x1d416edf38cf9170: from storage DS-54b21332-71bc-4a57-89dc-03ebbff72250 node DatanodeRegistration(127.0.0.1:43949, datanodeUuid=5584fa3b-e14c-4e79-ba2e-d4b3219a978d, infoPort=33267, infoSecurePort=0, ipcPort=41803, storageInfo=lv=-57;cid=testClusterID;nsid=28432284;c=1733618934874), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T00:48:56,477 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/982cec9f-9f6b-3bb2-8eb0-96cfa4313788 2024-12-08T00:48:56,481 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/982cec9f-9f6b-3bb2-8eb0-96cfa4313788/cluster_c1909289-147f-3345-59fd-781f6482af86/zookeeper_0, clientPort=55980, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/982cec9f-9f6b-3bb2-8eb0-96cfa4313788/cluster_c1909289-147f-3345-59fd-781f6482af86/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/982cec9f-9f6b-3bb2-8eb0-96cfa4313788/cluster_c1909289-147f-3345-59fd-781f6482af86/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-08T00:48:56,482 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=55980 2024-12-08T00:48:56,482 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:48:56,484 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:48:56,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43949 is added to blk_1073741825_1001 (size=7) 2024-12-08T00:48:56,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33349 is added to blk_1073741825_1001 (size=7) 2024-12-08T00:48:56,493 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a with version=8 2024-12-08T00:48:56,493 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/hbase-staging 2024-12-08T00:48:56,496 INFO [Time-limited test {}] client.ConnectionUtils(128): master/0f983e3e5be1:0 server-side Connection retries=45 2024-12-08T00:48:56,496 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T00:48:56,496 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T00:48:56,496 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T00:48:56,496 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T00:48:56,496 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T00:48:56,496 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-08T00:48:56,496 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T00:48:56,497 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40363 2024-12-08T00:48:56,498 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:40363 connecting to ZooKeeper ensemble=127.0.0.1:55980 2024-12-08T00:48:56,546 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:403630x0, quorum=127.0.0.1:55980, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T00:48:56,547 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40363-0x10002f3693d0000 connected 2024-12-08T00:48:56,669 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:48:56,672 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:48:56,676 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40363-0x10002f3693d0000, quorum=127.0.0.1:55980, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T00:48:56,676 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a, hbase.cluster.distributed=false 2024-12-08T00:48:56,680 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40363-0x10002f3693d0000, quorum=127.0.0.1:55980, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T00:48:56,681 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40363 2024-12-08T00:48:56,681 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40363 2024-12-08T00:48:56,681 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40363 2024-12-08T00:48:56,682 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40363 2024-12-08T00:48:56,682 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40363 2024-12-08T00:48:56,695 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/0f983e3e5be1:0 server-side Connection retries=45 2024-12-08T00:48:56,695 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T00:48:56,695 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T00:48:56,695 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T00:48:56,695 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T00:48:56,695 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T00:48:56,695 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-08T00:48:56,695 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T00:48:56,696 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46417 2024-12-08T00:48:56,697 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46417 connecting to ZooKeeper ensemble=127.0.0.1:55980 2024-12-08T00:48:56,698 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:48:56,699 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:48:56,709 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:464170x0, quorum=127.0.0.1:55980, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T00:48:56,709 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46417-0x10002f3693d0001 connected 2024-12-08T00:48:56,710 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46417-0x10002f3693d0001, quorum=127.0.0.1:55980, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T00:48:56,710 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-08T00:48:56,710 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-08T00:48:56,711 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46417-0x10002f3693d0001, quorum=127.0.0.1:55980, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-08T00:48:56,713 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46417-0x10002f3693d0001, quorum=127.0.0.1:55980, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T00:48:56,713 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46417 2024-12-08T00:48:56,713 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46417 2024-12-08T00:48:56,714 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46417 2024-12-08T00:48:56,714 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46417 2024-12-08T00:48:56,714 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46417 2024-12-08T00:48:56,727 DEBUG [M:0;0f983e3e5be1:40363 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;0f983e3e5be1:40363 2024-12-08T00:48:56,727 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/0f983e3e5be1,40363,1733618936495 2024-12-08T00:48:56,738 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46417-0x10002f3693d0001, quorum=127.0.0.1:55980, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T00:48:56,738 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40363-0x10002f3693d0000, quorum=127.0.0.1:55980, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T00:48:56,739 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40363-0x10002f3693d0000, quorum=127.0.0.1:55980, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/0f983e3e5be1,40363,1733618936495 2024-12-08T00:48:56,746 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46417-0x10002f3693d0001, quorum=127.0.0.1:55980, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-08T00:48:56,746 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40363-0x10002f3693d0000, quorum=127.0.0.1:55980, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:48:56,746 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46417-0x10002f3693d0001, quorum=127.0.0.1:55980, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:48:56,747 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40363-0x10002f3693d0000, quorum=127.0.0.1:55980, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-08T00:48:56,748 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/0f983e3e5be1,40363,1733618936495 from backup master directory 2024-12-08T00:48:56,759 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40363-0x10002f3693d0000, quorum=127.0.0.1:55980, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/0f983e3e5be1,40363,1733618936495 2024-12-08T00:48:56,759 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40363-0x10002f3693d0000, quorum=127.0.0.1:55980, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T00:48:56,759 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46417-0x10002f3693d0001, quorum=127.0.0.1:55980, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T00:48:56,759 WARN [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T00:48:56,759 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=0f983e3e5be1,40363,1733618936495 2024-12-08T00:48:56,768 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/hbase.id] with ID: 2a8ba3d6-ecf1-4da4-98d6-5488508cf31b 2024-12-08T00:48:56,768 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/.tmp/hbase.id 2024-12-08T00:48:56,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33349 is added to blk_1073741826_1002 (size=42) 2024-12-08T00:48:56,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43949 is added to blk_1073741826_1002 (size=42) 2024-12-08T00:48:56,777 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/.tmp/hbase.id]:[hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/hbase.id] 2024-12-08T00:48:56,789 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:48:56,789 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-08T00:48:56,790 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-08T00:48:56,801 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40363-0x10002f3693d0000, quorum=127.0.0.1:55980, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:48:56,801 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46417-0x10002f3693d0001, quorum=127.0.0.1:55980, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:48:56,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43949 is added to blk_1073741827_1003 (size=196) 2024-12-08T00:48:56,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33349 is added to blk_1073741827_1003 (size=196) 2024-12-08T00:48:56,810 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T00:48:56,811 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-08T00:48:56,811 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T00:48:56,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43949 is added to blk_1073741828_1004 (size=1189) 2024-12-08T00:48:56,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33349 is added to blk_1073741828_1004 (size=1189) 2024-12-08T00:48:56,820 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/MasterData/data/master/store 2024-12-08T00:48:56,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43949 is added to blk_1073741829_1005 (size=34) 2024-12-08T00:48:56,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33349 is added to blk_1073741829_1005 (size=34) 2024-12-08T00:48:56,826 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:48:56,826 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-08T00:48:56,826 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:48:56,827 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:48:56,827 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-08T00:48:56,827 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:48:56,827 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:48:56,827 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733618936826Disabling compacts and flushes for region at 1733618936826Disabling writes for close at 1733618936827 (+1 ms)Writing region close event to WAL at 1733618936827Closed at 1733618936827 2024-12-08T00:48:56,828 WARN [master/0f983e3e5be1:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/MasterData/data/master/store/.initializing 2024-12-08T00:48:56,828 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/MasterData/WALs/0f983e3e5be1,40363,1733618936495 2024-12-08T00:48:56,830 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0f983e3e5be1%2C40363%2C1733618936495, suffix=, logDir=hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/MasterData/WALs/0f983e3e5be1,40363,1733618936495, archiveDir=hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/MasterData/oldWALs, maxLogs=10 2024-12-08T00:48:56,831 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0f983e3e5be1%2C40363%2C1733618936495.1733618936831 2024-12-08T00:48:56,836 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/MasterData/WALs/0f983e3e5be1,40363,1733618936495/0f983e3e5be1%2C40363%2C1733618936495.1733618936831 2024-12-08T00:48:56,839 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39401:39401),(127.0.0.1/127.0.0.1:33267:33267)] 2024-12-08T00:48:56,841 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-08T00:48:56,841 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:48:56,841 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:48:56,841 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:48:56,843 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:48:56,844 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-08T00:48:56,844 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:48:56,844 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:48:56,845 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:48:56,846 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-08T00:48:56,846 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:48:56,846 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T00:48:56,846 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:48:56,847 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-08T00:48:56,847 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:48:56,848 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T00:48:56,848 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:48:56,849 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-08T00:48:56,849 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:48:56,849 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T00:48:56,850 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:48:56,850 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:48:56,850 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:48:56,852 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:48:56,852 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:48:56,852 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-08T00:48:56,853 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:48:56,855 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T00:48:56,855 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=773947, jitterRate=-0.015876412391662598}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-08T00:48:56,856 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733618936841Initializing all the Stores at 1733618936842 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733618936842Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733618936843 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733618936843Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733618936843Cleaning up temporary data from old regions at 1733618936852 (+9 ms)Region opened successfully at 1733618936856 (+4 ms) 2024-12-08T00:48:56,856 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-08T00:48:56,859 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@34d93077, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0f983e3e5be1/172.17.0.2:0 2024-12-08T00:48:56,860 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-08T00:48:56,860 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-08T00:48:56,860 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-08T00:48:56,860 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-08T00:48:56,860 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-08T00:48:56,861 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-08T00:48:56,861 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-08T00:48:56,862 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-08T00:48:56,863 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40363-0x10002f3693d0000, quorum=127.0.0.1:55980, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-08T00:48:56,880 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-08T00:48:56,880 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-08T00:48:56,881 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40363-0x10002f3693d0000, quorum=127.0.0.1:55980, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-08T00:48:56,888 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-08T00:48:56,888 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-08T00:48:56,889 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40363-0x10002f3693d0000, quorum=127.0.0.1:55980, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-08T00:48:56,896 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-08T00:48:56,898 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40363-0x10002f3693d0000, quorum=127.0.0.1:55980, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-08T00:48:56,905 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-08T00:48:56,907 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40363-0x10002f3693d0000, quorum=127.0.0.1:55980, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-08T00:48:56,917 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-08T00:48:56,926 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40363-0x10002f3693d0000, quorum=127.0.0.1:55980, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T00:48:56,926 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46417-0x10002f3693d0001, quorum=127.0.0.1:55980, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T00:48:56,926 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46417-0x10002f3693d0001, quorum=127.0.0.1:55980, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:48:56,926 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40363-0x10002f3693d0000, quorum=127.0.0.1:55980, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:48:56,926 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=0f983e3e5be1,40363,1733618936495, sessionid=0x10002f3693d0000, setting cluster-up flag (Was=false) 2024-12-08T00:48:56,942 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46417-0x10002f3693d0001, quorum=127.0.0.1:55980, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:48:56,942 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40363-0x10002f3693d0000, quorum=127.0.0.1:55980, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:48:56,967 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-08T00:48:56,969 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0f983e3e5be1,40363,1733618936495 2024-12-08T00:48:56,988 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40363-0x10002f3693d0000, quorum=127.0.0.1:55980, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:48:56,988 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46417-0x10002f3693d0001, quorum=127.0.0.1:55980, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:48:57,013 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-08T00:48:57,014 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0f983e3e5be1,40363,1733618936495 2024-12-08T00:48:57,016 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-08T00:48:57,017 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-08T00:48:57,018 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-08T00:48:57,018 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-08T00:48:57,018 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 0f983e3e5be1,40363,1733618936495 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-08T00:48:57,019 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/0f983e3e5be1:0, corePoolSize=5, maxPoolSize=5 2024-12-08T00:48:57,019 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/0f983e3e5be1:0, corePoolSize=5, maxPoolSize=5 2024-12-08T00:48:57,019 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/0f983e3e5be1:0, corePoolSize=5, maxPoolSize=5 2024-12-08T00:48:57,019 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/0f983e3e5be1:0, corePoolSize=5, maxPoolSize=5 2024-12-08T00:48:57,019 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/0f983e3e5be1:0, corePoolSize=10, maxPoolSize=10 2024-12-08T00:48:57,019 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:48:57,019 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/0f983e3e5be1:0, corePoolSize=2, maxPoolSize=2 2024-12-08T00:48:57,019 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:48:57,020 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733618967020 2024-12-08T00:48:57,020 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-08T00:48:57,021 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-08T00:48:57,021 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-08T00:48:57,021 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-08T00:48:57,021 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-08T00:48:57,021 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-08T00:48:57,021 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:57,021 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-08T00:48:57,021 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-08T00:48:57,021 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-08T00:48:57,021 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T00:48:57,022 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-08T00:48:57,022 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-08T00:48:57,022 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-08T00:48:57,022 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/0f983e3e5be1:0:becomeActiveMaster-HFileCleaner.large.0-1733618937022,5,FailOnTimeoutGroup] 2024-12-08T00:48:57,022 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/0f983e3e5be1:0:becomeActiveMaster-HFileCleaner.small.0-1733618937022,5,FailOnTimeoutGroup] 2024-12-08T00:48:57,022 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:57,022 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-08T00:48:57,022 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:57,022 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:57,023 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:48:57,023 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-08T00:48:57,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43949 is added to blk_1073741831_1007 (size=1321) 2024-12-08T00:48:57,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33349 is added to blk_1073741831_1007 (size=1321) 2024-12-08T00:48:57,031 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-08T00:48:57,031 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a 2024-12-08T00:48:57,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43949 is added to blk_1073741832_1008 (size=32) 2024-12-08T00:48:57,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33349 is added to blk_1073741832_1008 (size=32) 2024-12-08T00:48:57,041 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:48:57,042 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-08T00:48:57,044 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-08T00:48:57,044 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:48:57,044 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:48:57,045 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-08T00:48:57,046 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-08T00:48:57,046 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:48:57,046 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:48:57,047 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-08T00:48:57,048 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-08T00:48:57,048 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:48:57,048 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:48:57,048 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-08T00:48:57,049 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-08T00:48:57,049 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:48:57,050 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:48:57,050 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-08T00:48:57,050 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/data/hbase/meta/1588230740 2024-12-08T00:48:57,051 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/data/hbase/meta/1588230740 2024-12-08T00:48:57,052 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-08T00:48:57,052 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-08T00:48:57,052 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-08T00:48:57,053 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-08T00:48:57,055 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T00:48:57,055 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=745985, jitterRate=-0.051431357860565186}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-08T00:48:57,056 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733618937041Initializing all the Stores at 1733618937042 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733618937042Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733618937042Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733618937042Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733618937042Cleaning up temporary data from old regions at 1733618937052 (+10 ms)Region opened successfully at 1733618937056 (+4 ms) 2024-12-08T00:48:57,056 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-08T00:48:57,056 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-08T00:48:57,056 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-08T00:48:57,056 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-08T00:48:57,056 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-08T00:48:57,056 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-08T00:48:57,056 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733618937056Disabling compacts and flushes for region at 1733618937056Disabling writes for close at 1733618937056Writing region close event to WAL at 1733618937056Closed at 1733618937056 2024-12-08T00:48:57,058 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T00:48:57,058 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-08T00:48:57,058 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-08T00:48:57,059 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-08T00:48:57,060 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-08T00:48:57,116 INFO [RS:0;0f983e3e5be1:46417 {}] regionserver.HRegionServer(746): ClusterId : 2a8ba3d6-ecf1-4da4-98d6-5488508cf31b 2024-12-08T00:48:57,116 DEBUG [RS:0;0f983e3e5be1:46417 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-08T00:48:57,135 DEBUG [RS:0;0f983e3e5be1:46417 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-08T00:48:57,135 DEBUG [RS:0;0f983e3e5be1:46417 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-08T00:48:57,144 DEBUG [RS:0;0f983e3e5be1:46417 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-08T00:48:57,144 DEBUG [RS:0;0f983e3e5be1:46417 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@63bcb61, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0f983e3e5be1/172.17.0.2:0 2024-12-08T00:48:57,157 DEBUG [RS:0;0f983e3e5be1:46417 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;0f983e3e5be1:46417 2024-12-08T00:48:57,157 INFO [RS:0;0f983e3e5be1:46417 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-08T00:48:57,157 INFO [RS:0;0f983e3e5be1:46417 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-08T00:48:57,157 DEBUG [RS:0;0f983e3e5be1:46417 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-08T00:48:57,158 INFO [RS:0;0f983e3e5be1:46417 {}] regionserver.HRegionServer(2659): reportForDuty to master=0f983e3e5be1,40363,1733618936495 with port=46417, startcode=1733618936694 2024-12-08T00:48:57,158 DEBUG [RS:0;0f983e3e5be1:46417 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-08T00:48:57,160 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59353, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-08T00:48:57,160 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40363 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 0f983e3e5be1,46417,1733618936694 2024-12-08T00:48:57,160 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40363 {}] master.ServerManager(517): Registering regionserver=0f983e3e5be1,46417,1733618936694 2024-12-08T00:48:57,162 DEBUG [RS:0;0f983e3e5be1:46417 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a 2024-12-08T00:48:57,162 DEBUG [RS:0;0f983e3e5be1:46417 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37255 2024-12-08T00:48:57,162 DEBUG [RS:0;0f983e3e5be1:46417 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-08T00:48:57,171 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40363-0x10002f3693d0000, quorum=127.0.0.1:55980, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T00:48:57,172 DEBUG [RS:0;0f983e3e5be1:46417 {}] zookeeper.ZKUtil(111): regionserver:46417-0x10002f3693d0001, quorum=127.0.0.1:55980, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/0f983e3e5be1,46417,1733618936694 2024-12-08T00:48:57,172 WARN [RS:0;0f983e3e5be1:46417 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T00:48:57,172 INFO [RS:0;0f983e3e5be1:46417 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T00:48:57,172 DEBUG [RS:0;0f983e3e5be1:46417 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694 2024-12-08T00:48:57,172 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [0f983e3e5be1,46417,1733618936694] 2024-12-08T00:48:57,176 INFO [RS:0;0f983e3e5be1:46417 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-08T00:48:57,177 INFO [RS:0;0f983e3e5be1:46417 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-08T00:48:57,177 INFO [RS:0;0f983e3e5be1:46417 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-08T00:48:57,177 INFO [RS:0;0f983e3e5be1:46417 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:57,178 INFO [RS:0;0f983e3e5be1:46417 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-08T00:48:57,178 INFO [RS:0;0f983e3e5be1:46417 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-08T00:48:57,178 INFO [RS:0;0f983e3e5be1:46417 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:57,179 DEBUG [RS:0;0f983e3e5be1:46417 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:48:57,179 DEBUG [RS:0;0f983e3e5be1:46417 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:48:57,179 DEBUG [RS:0;0f983e3e5be1:46417 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:48:57,179 DEBUG [RS:0;0f983e3e5be1:46417 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:48:57,179 DEBUG [RS:0;0f983e3e5be1:46417 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:48:57,179 DEBUG [RS:0;0f983e3e5be1:46417 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/0f983e3e5be1:0, corePoolSize=2, maxPoolSize=2 2024-12-08T00:48:57,179 DEBUG [RS:0;0f983e3e5be1:46417 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:48:57,179 DEBUG [RS:0;0f983e3e5be1:46417 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:48:57,179 DEBUG [RS:0;0f983e3e5be1:46417 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:48:57,179 DEBUG [RS:0;0f983e3e5be1:46417 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:48:57,179 DEBUG [RS:0;0f983e3e5be1:46417 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:48:57,179 DEBUG [RS:0;0f983e3e5be1:46417 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:48:57,179 DEBUG [RS:0;0f983e3e5be1:46417 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/0f983e3e5be1:0, corePoolSize=3, maxPoolSize=3 2024-12-08T00:48:57,179 DEBUG [RS:0;0f983e3e5be1:46417 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0, corePoolSize=3, maxPoolSize=3 2024-12-08T00:48:57,180 INFO [RS:0;0f983e3e5be1:46417 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:57,180 INFO [RS:0;0f983e3e5be1:46417 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:57,180 INFO [RS:0;0f983e3e5be1:46417 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:57,180 INFO [RS:0;0f983e3e5be1:46417 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:57,180 INFO [RS:0;0f983e3e5be1:46417 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:57,180 INFO [RS:0;0f983e3e5be1:46417 {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,46417,1733618936694-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T00:48:57,192 INFO [RS:0;0f983e3e5be1:46417 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-08T00:48:57,192 INFO [RS:0;0f983e3e5be1:46417 {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,46417,1733618936694-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:57,192 INFO [RS:0;0f983e3e5be1:46417 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:57,192 INFO [RS:0;0f983e3e5be1:46417 {}] regionserver.Replication(171): 0f983e3e5be1,46417,1733618936694 started 2024-12-08T00:48:57,203 INFO [RS:0;0f983e3e5be1:46417 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:57,203 INFO [RS:0;0f983e3e5be1:46417 {}] regionserver.HRegionServer(1482): Serving as 0f983e3e5be1,46417,1733618936694, RpcServer on 0f983e3e5be1/172.17.0.2:46417, sessionid=0x10002f3693d0001 2024-12-08T00:48:57,203 DEBUG [RS:0;0f983e3e5be1:46417 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-08T00:48:57,203 DEBUG [RS:0;0f983e3e5be1:46417 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 0f983e3e5be1,46417,1733618936694 2024-12-08T00:48:57,203 DEBUG [RS:0;0f983e3e5be1:46417 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0f983e3e5be1,46417,1733618936694' 2024-12-08T00:48:57,203 DEBUG [RS:0;0f983e3e5be1:46417 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-08T00:48:57,203 DEBUG [RS:0;0f983e3e5be1:46417 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-08T00:48:57,204 DEBUG [RS:0;0f983e3e5be1:46417 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-08T00:48:57,204 DEBUG [RS:0;0f983e3e5be1:46417 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-08T00:48:57,204 DEBUG [RS:0;0f983e3e5be1:46417 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 0f983e3e5be1,46417,1733618936694 2024-12-08T00:48:57,204 DEBUG [RS:0;0f983e3e5be1:46417 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0f983e3e5be1,46417,1733618936694' 2024-12-08T00:48:57,204 DEBUG [RS:0;0f983e3e5be1:46417 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-08T00:48:57,204 DEBUG [RS:0;0f983e3e5be1:46417 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-08T00:48:57,205 DEBUG [RS:0;0f983e3e5be1:46417 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-08T00:48:57,205 INFO [RS:0;0f983e3e5be1:46417 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-08T00:48:57,205 INFO [RS:0;0f983e3e5be1:46417 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-08T00:48:57,210 WARN [0f983e3e5be1:40363 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-08T00:48:57,308 INFO [RS:0;0f983e3e5be1:46417 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0f983e3e5be1%2C46417%2C1733618936694, suffix=, logDir=hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694, archiveDir=hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/oldWALs, maxLogs=32 2024-12-08T00:48:57,310 INFO [RS:0;0f983e3e5be1:46417 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0f983e3e5be1%2C46417%2C1733618936694.1733618937310 2024-12-08T00:48:57,313 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:48:57,320 INFO [RS:0;0f983e3e5be1:46417 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.1733618937310 2024-12-08T00:48:57,325 DEBUG [RS:0;0f983e3e5be1:46417 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33267:33267),(127.0.0.1/127.0.0.1:39401:39401)] 2024-12-08T00:48:57,358 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:48:57,460 DEBUG [0f983e3e5be1:40363 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-08T00:48:57,461 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=0f983e3e5be1,46417,1733618936694 2024-12-08T00:48:57,464 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0f983e3e5be1,46417,1733618936694, state=OPENING 2024-12-08T00:48:57,497 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-08T00:48:57,505 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40363-0x10002f3693d0000, quorum=127.0.0.1:55980, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:48:57,505 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46417-0x10002f3693d0001, quorum=127.0.0.1:55980, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:48:57,507 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-08T00:48:57,507 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T00:48:57,507 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T00:48:57,507 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=0f983e3e5be1,46417,1733618936694}] 2024-12-08T00:48:57,665 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-08T00:48:57,670 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38217, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-08T00:48:57,676 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-08T00:48:57,676 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T00:48:57,678 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0f983e3e5be1%2C46417%2C1733618936694.meta, suffix=.meta, logDir=hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694, archiveDir=hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/oldWALs, maxLogs=32 2024-12-08T00:48:57,679 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 0f983e3e5be1%2C46417%2C1733618936694.meta.1733618937679.meta 2024-12-08T00:48:57,684 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.meta.1733618937679.meta 2024-12-08T00:48:57,685 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33267:33267),(127.0.0.1/127.0.0.1:39401:39401)] 2024-12-08T00:48:57,686 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-08T00:48:57,687 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-08T00:48:57,687 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-08T00:48:57,687 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-08T00:48:57,687 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-08T00:48:57,687 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:48:57,687 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-08T00:48:57,687 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-08T00:48:57,691 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-08T00:48:57,692 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-08T00:48:57,692 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:48:57,693 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:48:57,693 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-08T00:48:57,694 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-08T00:48:57,694 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:48:57,694 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:48:57,694 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-08T00:48:57,695 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-08T00:48:57,695 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:48:57,696 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:48:57,696 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-08T00:48:57,697 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-08T00:48:57,697 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:48:57,698 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:48:57,698 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-08T00:48:57,699 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/data/hbase/meta/1588230740 2024-12-08T00:48:57,700 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/data/hbase/meta/1588230740 2024-12-08T00:48:57,701 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-08T00:48:57,701 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-08T00:48:57,701 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-08T00:48:57,702 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-08T00:48:57,703 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=695119, jitterRate=-0.11611148715019226}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-08T00:48:57,703 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-08T00:48:57,704 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733618937687Writing region info on filesystem at 1733618937687Initializing all the Stores at 1733618937688 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733618937688Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733618937691 (+3 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733618937691Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733618937691Cleaning up temporary data from old regions at 1733618937701 (+10 ms)Running coprocessor post-open hooks at 1733618937703 (+2 ms)Region opened successfully at 1733618937704 (+1 ms) 2024-12-08T00:48:57,705 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733618937664 2024-12-08T00:48:57,707 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-08T00:48:57,707 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-08T00:48:57,708 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=0f983e3e5be1,46417,1733618936694 2024-12-08T00:48:57,709 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0f983e3e5be1,46417,1733618936694, state=OPEN 2024-12-08T00:48:57,746 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40363-0x10002f3693d0000, quorum=127.0.0.1:55980, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T00:48:57,746 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46417-0x10002f3693d0001, quorum=127.0.0.1:55980, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T00:48:57,746 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=0f983e3e5be1,46417,1733618936694 2024-12-08T00:48:57,746 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T00:48:57,746 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T00:48:57,752 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-08T00:48:57,752 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=0f983e3e5be1,46417,1733618936694 in 239 msec 2024-12-08T00:48:57,756 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-08T00:48:57,756 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 694 msec 2024-12-08T00:48:57,757 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T00:48:57,757 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-08T00:48:57,759 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-08T00:48:57,759 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=0f983e3e5be1,46417,1733618936694, seqNum=-1] 2024-12-08T00:48:57,760 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:48:57,761 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60057, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:48:57,767 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 749 msec 2024-12-08T00:48:57,768 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733618937767, completionTime=-1 2024-12-08T00:48:57,768 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-08T00:48:57,768 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-08T00:48:57,769 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-08T00:48:57,769 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733618997769 2024-12-08T00:48:57,769 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733619057769 2024-12-08T00:48:57,769 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-12-08T00:48:57,770 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,40363,1733618936495-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:57,770 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,40363,1733618936495-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:57,770 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,40363,1733618936495-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:57,770 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-0f983e3e5be1:40363, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:57,770 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:57,770 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:57,771 DEBUG [master/0f983e3e5be1:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-08T00:48:57,773 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.013sec 2024-12-08T00:48:57,773 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-08T00:48:57,773 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-08T00:48:57,773 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-08T00:48:57,773 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-08T00:48:57,774 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-08T00:48:57,774 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,40363,1733618936495-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T00:48:57,774 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,40363,1733618936495-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-08T00:48:57,776 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-08T00:48:57,776 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-08T00:48:57,776 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,40363,1733618936495-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T00:48:57,817 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@73751e94, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:48:57,817 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 0f983e3e5be1,40363,-1 for getting cluster id 2024-12-08T00:48:57,817 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-08T00:48:57,818 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '2a8ba3d6-ecf1-4da4-98d6-5488508cf31b' 2024-12-08T00:48:57,819 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-08T00:48:57,819 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "2a8ba3d6-ecf1-4da4-98d6-5488508cf31b" 2024-12-08T00:48:57,819 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@57e4969f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:48:57,819 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [0f983e3e5be1,40363,-1] 2024-12-08T00:48:57,819 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-08T00:48:57,819 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:48:57,821 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32786, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-08T00:48:57,821 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@449a1c0e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:48:57,822 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-08T00:48:57,822 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=0f983e3e5be1,46417,1733618936694, seqNum=-1] 2024-12-08T00:48:57,823 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:48:57,824 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50674, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:48:57,826 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=0f983e3e5be1,40363,1733618936495 2024-12-08T00:48:57,826 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:48:57,828 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-08T00:48:57,828 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-12-08T00:48:57,828 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-12-08T00:48:57,829 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-08T00:48:57,829 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is 0f983e3e5be1,40363,1733618936495 2024-12-08T00:48:57,829 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@2c4b315c 2024-12-08T00:48:57,830 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-08T00:48:57,831 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32800, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-08T00:48:57,832 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40363 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-08T00:48:57,832 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40363 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-08T00:48:57,832 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40363 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T00:48:57,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40363 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-12-08T00:48:57,835 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-12-08T00:48:57,835 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:48:57,835 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40363 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-12-08T00:48:57,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40363 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-08T00:48:57,837 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-08T00:48:57,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43949 is added to blk_1073741835_1011 (size=395) 2024-12-08T00:48:57,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33349 is added to blk_1073741835_1011 (size=395) 2024-12-08T00:48:57,845 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => ffc8037c0d95653204c1d5fe84f159d2, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1733618937831.ffc8037c0d95653204c1d5fe84f159d2.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a 2024-12-08T00:48:57,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33349 is added to blk_1073741836_1012 (size=78) 2024-12-08T00:48:57,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43949 is added to blk_1073741836_1012 (size=78) 2024-12-08T00:48:57,851 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1733618937831.ffc8037c0d95653204c1d5fe84f159d2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:48:57,851 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing ffc8037c0d95653204c1d5fe84f159d2, disabling compactions & flushes 2024-12-08T00:48:57,851 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1733618937831.ffc8037c0d95653204c1d5fe84f159d2. 2024-12-08T00:48:57,851 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733618937831.ffc8037c0d95653204c1d5fe84f159d2. 2024-12-08T00:48:57,851 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733618937831.ffc8037c0d95653204c1d5fe84f159d2. after waiting 0 ms 2024-12-08T00:48:57,851 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1733618937831.ffc8037c0d95653204c1d5fe84f159d2. 2024-12-08T00:48:57,851 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733618937831.ffc8037c0d95653204c1d5fe84f159d2. 2024-12-08T00:48:57,852 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for ffc8037c0d95653204c1d5fe84f159d2: Waiting for close lock at 1733618937851Disabling compacts and flushes for region at 1733618937851Disabling writes for close at 1733618937851Writing region close event to WAL at 1733618937851Closed at 1733618937851 2024-12-08T00:48:57,853 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-12-08T00:48:57,853 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1733618937831.ffc8037c0d95653204c1d5fe84f159d2.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1733618937853"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733618937853"}]},"ts":"1733618937853"} 2024-12-08T00:48:57,856 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-08T00:48:57,857 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-08T00:48:57,857 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733618937857"}]},"ts":"1733618937857"} 2024-12-08T00:48:57,859 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-12-08T00:48:57,860 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=ffc8037c0d95653204c1d5fe84f159d2, ASSIGN}] 2024-12-08T00:48:57,861 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=ffc8037c0d95653204c1d5fe84f159d2, ASSIGN 2024-12-08T00:48:57,862 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=ffc8037c0d95653204c1d5fe84f159d2, ASSIGN; state=OFFLINE, location=0f983e3e5be1,46417,1733618936694; forceNewPlan=false, retain=false 2024-12-08T00:48:58,013 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=ffc8037c0d95653204c1d5fe84f159d2, regionState=OPENING, regionLocation=0f983e3e5be1,46417,1733618936694 2024-12-08T00:48:58,016 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=ffc8037c0d95653204c1d5fe84f159d2, ASSIGN because future has completed 2024-12-08T00:48:58,016 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure ffc8037c0d95653204c1d5fe84f159d2, server=0f983e3e5be1,46417,1733618936694}] 2024-12-08T00:48:58,172 INFO [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1733618937831.ffc8037c0d95653204c1d5fe84f159d2. 2024-12-08T00:48:58,173 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => ffc8037c0d95653204c1d5fe84f159d2, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1733618937831.ffc8037c0d95653204c1d5fe84f159d2.', STARTKEY => '', ENDKEY => ''} 2024-12-08T00:48:58,173 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart ffc8037c0d95653204c1d5fe84f159d2 2024-12-08T00:48:58,173 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1733618937831.ffc8037c0d95653204c1d5fe84f159d2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:48:58,173 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for ffc8037c0d95653204c1d5fe84f159d2 2024-12-08T00:48:58,173 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for ffc8037c0d95653204c1d5fe84f159d2 2024-12-08T00:48:58,175 INFO [StoreOpener-ffc8037c0d95653204c1d5fe84f159d2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region ffc8037c0d95653204c1d5fe84f159d2 2024-12-08T00:48:58,176 INFO [StoreOpener-ffc8037c0d95653204c1d5fe84f159d2-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ffc8037c0d95653204c1d5fe84f159d2 columnFamilyName info 2024-12-08T00:48:58,176 DEBUG [StoreOpener-ffc8037c0d95653204c1d5fe84f159d2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:48:58,177 INFO [StoreOpener-ffc8037c0d95653204c1d5fe84f159d2-1 {}] regionserver.HStore(327): Store=ffc8037c0d95653204c1d5fe84f159d2/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T00:48:58,177 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for ffc8037c0d95653204c1d5fe84f159d2 2024-12-08T00:48:58,178 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/data/default/TestLogRolling-testLogRollOnPipelineRestart/ffc8037c0d95653204c1d5fe84f159d2 2024-12-08T00:48:58,178 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/data/default/TestLogRolling-testLogRollOnPipelineRestart/ffc8037c0d95653204c1d5fe84f159d2 2024-12-08T00:48:58,179 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for ffc8037c0d95653204c1d5fe84f159d2 2024-12-08T00:48:58,179 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for ffc8037c0d95653204c1d5fe84f159d2 2024-12-08T00:48:58,181 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for ffc8037c0d95653204c1d5fe84f159d2 2024-12-08T00:48:58,184 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/data/default/TestLogRolling-testLogRollOnPipelineRestart/ffc8037c0d95653204c1d5fe84f159d2/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T00:48:58,185 INFO [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened ffc8037c0d95653204c1d5fe84f159d2; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=711961, jitterRate=-0.09469574689865112}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-08T00:48:58,185 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ffc8037c0d95653204c1d5fe84f159d2 2024-12-08T00:48:58,186 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for ffc8037c0d95653204c1d5fe84f159d2: Running coprocessor pre-open hook at 1733618938173Writing region info on filesystem at 1733618938173Initializing all the Stores at 1733618938174 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733618938174Cleaning up temporary data from old regions at 1733618938179 (+5 ms)Running coprocessor post-open hooks at 1733618938185 (+6 ms)Region opened successfully at 1733618938186 (+1 ms) 2024-12-08T00:48:58,188 INFO [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1733618937831.ffc8037c0d95653204c1d5fe84f159d2., pid=6, masterSystemTime=1733618938169 2024-12-08T00:48:58,190 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1733618937831.ffc8037c0d95653204c1d5fe84f159d2. 2024-12-08T00:48:58,191 INFO [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1733618937831.ffc8037c0d95653204c1d5fe84f159d2. 2024-12-08T00:48:58,192 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=ffc8037c0d95653204c1d5fe84f159d2, regionState=OPEN, openSeqNum=2, regionLocation=0f983e3e5be1,46417,1733618936694 2024-12-08T00:48:58,194 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure ffc8037c0d95653204c1d5fe84f159d2, server=0f983e3e5be1,46417,1733618936694 because future has completed 2024-12-08T00:48:58,198 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-08T00:48:58,198 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure ffc8037c0d95653204c1d5fe84f159d2, server=0f983e3e5be1,46417,1733618936694 in 180 msec 2024-12-08T00:48:58,201 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-08T00:48:58,201 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=ffc8037c0d95653204c1d5fe84f159d2, ASSIGN in 338 msec 2024-12-08T00:48:58,202 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-08T00:48:58,202 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733618938202"}]},"ts":"1733618938202"} 2024-12-08T00:48:58,204 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-12-08T00:48:58,205 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-12-08T00:48:58,207 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 373 msec 2024-12-08T00:48:58,314 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:48:58,359 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:48:59,314 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:48:59,360 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:00,316 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:00,361 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:01,317 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:01,362 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:01,419 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-08T00:49:01,439 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:01,439 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:01,439 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:01,440 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:01,440 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:01,440 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:01,442 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:01,443 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:01,443 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:01,445 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:02,319 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:02,364 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:03,176 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-08T00:49:03,179 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-12-08T00:49:03,320 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:03,365 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:04,321 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:04,366 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:05,322 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:05,366 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:05,912 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-08T00:49:05,912 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-08T00:49:05,913 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-12-08T00:49:05,914 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-12-08T00:49:05,915 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-08T00:49:05,915 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-08T00:49:05,915 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-08T00:49:05,915 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-08T00:49:06,323 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:06,368 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:07,325 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:07,369 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:07,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40363 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-08T00:49:07,897 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-12-08T00:49:07,897 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-12-08T00:49:07,904 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-12-08T00:49:07,904 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1733618937831.ffc8037c0d95653204c1d5fe84f159d2. 2024-12-08T00:49:07,909 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1733618937831.ffc8037c0d95653204c1d5fe84f159d2., hostname=0f983e3e5be1,46417,1733618936694, seqNum=2] 2024-12-08T00:49:08,326 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:08,370 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:09,327 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:09,371 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:09,912 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.1733618937310 2024-12-08T00:49:09,912 WARN [ResponseProcessor for block BP-2043044047-172.17.0.2-1733618934874:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2043044047-172.17.0.2-1733618934874:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:49:09,912 WARN [ResponseProcessor for block BP-2043044047-172.17.0.2-1733618934874:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2043044047-172.17.0.2-1733618934874:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:49:09,913 WARN [ResponseProcessor for block BP-2043044047-172.17.0.2-1733618934874:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2043044047-172.17.0.2-1733618934874:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-2043044047-172.17.0.2-1733618934874:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:43949,DS-58b1bf7e-5c6e-4739-b326-ae88a0799027,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:49:09,913 WARN [DataStreamer for file /user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.1733618937310 block BP-2043044047-172.17.0.2-1733618934874:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2043044047-172.17.0.2-1733618934874:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43949,DS-58b1bf7e-5c6e-4739-b326-ae88a0799027,DISK], DatanodeInfoWithStorage[127.0.0.1:33349,DS-3e53e221-56b1-47c9-a3fe-5e8efbec7405,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43949,DS-58b1bf7e-5c6e-4739-b326-ae88a0799027,DISK]) is bad. 2024-12-08T00:49:09,913 WARN [DataStreamer for file /user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/MasterData/WALs/0f983e3e5be1,40363,1733618936495/0f983e3e5be1%2C40363%2C1733618936495.1733618936831 block BP-2043044047-172.17.0.2-1733618934874:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2043044047-172.17.0.2-1733618934874:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33349,DS-3e53e221-56b1-47c9-a3fe-5e8efbec7405,DISK], DatanodeInfoWithStorage[127.0.0.1:43949,DS-58b1bf7e-5c6e-4739-b326-ae88a0799027,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43949,DS-58b1bf7e-5c6e-4739-b326-ae88a0799027,DISK]) is bad. 2024-12-08T00:49:09,913 WARN [DataStreamer for file /user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.meta.1733618937679.meta block BP-2043044047-172.17.0.2-1733618934874:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2043044047-172.17.0.2-1733618934874:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43949,DS-58b1bf7e-5c6e-4739-b326-ae88a0799027,DISK], DatanodeInfoWithStorage[127.0.0.1:33349,DS-3e53e221-56b1-47c9-a3fe-5e8efbec7405,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43949,DS-58b1bf7e-5c6e-4739-b326-ae88a0799027,DISK]) is bad. 2024-12-08T00:49:09,913 WARN [PacketResponder: BP-2043044047-172.17.0.2-1733618934874:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:43949] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:49:09,913 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_389193149_22 at /127.0.0.1:42262 [Receiving block BP-2043044047-172.17.0.2-1733618934874:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:43949:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42262 dst: /127.0.0.1:43949 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:49:09,913 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1513898204_22 at /127.0.0.1:52978 [Receiving block BP-2043044047-172.17.0.2-1733618934874:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:33349:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52978 dst: /127.0.0.1:33349 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:49:09,913 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_389193149_22 at /127.0.0.1:42264 [Receiving block BP-2043044047-172.17.0.2-1733618934874:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:43949:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42264 dst: /127.0.0.1:43949 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:49:09,914 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1513898204_22 at /127.0.0.1:42220 [Receiving block BP-2043044047-172.17.0.2-1733618934874:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:43949:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42220 dst: /127.0.0.1:43949 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:49:09,914 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_389193149_22 at /127.0.0.1:53002 [Receiving block BP-2043044047-172.17.0.2-1733618934874:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:33349:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53002 dst: /127.0.0.1:33349 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:49:09,914 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_389193149_22 at /127.0.0.1:52988 [Receiving block BP-2043044047-172.17.0.2-1733618934874:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:33349:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52988 dst: /127.0.0.1:33349 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:49:09,976 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7098476e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T00:49:09,977 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2c66348c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T00:49:09,977 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T00:49:09,977 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3df67424{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T00:49:09,978 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@124b3c60{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/982cec9f-9f6b-3bb2-8eb0-96cfa4313788/hadoop.log.dir/,STOPPED} 2024-12-08T00:49:09,981 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T00:49:09,982 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T00:49:09,982 WARN [BP-2043044047-172.17.0.2-1733618934874 heartbeating to localhost/127.0.0.1:37255 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T00:49:09,982 WARN [BP-2043044047-172.17.0.2-1733618934874 heartbeating to localhost/127.0.0.1:37255 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2043044047-172.17.0.2-1733618934874 (Datanode Uuid 5584fa3b-e14c-4e79-ba2e-d4b3219a978d) service to localhost/127.0.0.1:37255 2024-12-08T00:49:09,982 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/982cec9f-9f6b-3bb2-8eb0-96cfa4313788/cluster_c1909289-147f-3345-59fd-781f6482af86/data/data3/current/BP-2043044047-172.17.0.2-1733618934874 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T00:49:09,982 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/982cec9f-9f6b-3bb2-8eb0-96cfa4313788/cluster_c1909289-147f-3345-59fd-781f6482af86/data/data4/current/BP-2043044047-172.17.0.2-1733618934874 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T00:49:09,983 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T00:49:09,991 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T00:49:09,994 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T00:49:09,995 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T00:49:09,995 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T00:49:09,995 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T00:49:09,996 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@920ba2b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/982cec9f-9f6b-3bb2-8eb0-96cfa4313788/hadoop.log.dir/,AVAILABLE} 2024-12-08T00:49:09,996 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@c15ccec{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T00:49:10,086 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5fb420d9{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/982cec9f-9f6b-3bb2-8eb0-96cfa4313788/java.io.tmpdir/jetty-localhost-45441-hadoop-hdfs-3_4_1-tests_jar-_-any-8939487281140729804/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T00:49:10,086 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@140d49ad{HTTP/1.1, (http/1.1)}{localhost:45441} 2024-12-08T00:49:10,086 INFO [Time-limited test {}] server.Server(415): Started @169516ms 2024-12-08T00:49:10,087 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T00:49:10,103 WARN [ResponseProcessor for block BP-2043044047-172.17.0.2-1733618934874:blk_1073741830_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2043044047-172.17.0.2-1733618934874:blk_1073741830_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:49:10,103 WARN [ResponseProcessor for block BP-2043044047-172.17.0.2-1733618934874:blk_1073741834_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2043044047-172.17.0.2-1733618934874:blk_1073741834_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:49:10,103 WARN [ResponseProcessor for block BP-2043044047-172.17.0.2-1733618934874:blk_1073741833_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2043044047-172.17.0.2-1733618934874:blk_1073741833_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:49:10,104 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_389193149_22 at /127.0.0.1:44524 [Receiving block BP-2043044047-172.17.0.2-1733618934874:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:33349:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44524 dst: /127.0.0.1:33349 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:49:10,104 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_389193149_22 at /127.0.0.1:44536 [Receiving block BP-2043044047-172.17.0.2-1733618934874:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:33349:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44536 dst: /127.0.0.1:33349 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:49:10,104 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1513898204_22 at /127.0.0.1:44550 [Receiving block BP-2043044047-172.17.0.2-1733618934874:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:33349:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44550 dst: /127.0.0.1:33349 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:49:10,107 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@108cc47d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T00:49:10,107 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@72f811fc{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T00:49:10,107 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T00:49:10,107 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@63477bff{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T00:49:10,107 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7d5b7744{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/982cec9f-9f6b-3bb2-8eb0-96cfa4313788/hadoop.log.dir/,STOPPED} 2024-12-08T00:49:10,109 WARN [BP-2043044047-172.17.0.2-1733618934874 heartbeating to localhost/127.0.0.1:37255 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T00:49:10,109 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T00:49:10,109 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T00:49:10,109 WARN [BP-2043044047-172.17.0.2-1733618934874 heartbeating to localhost/127.0.0.1:37255 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2043044047-172.17.0.2-1733618934874 (Datanode Uuid 79655df2-a483-4591-9bcf-bcb8d8bbb0fa) service to localhost/127.0.0.1:37255 2024-12-08T00:49:10,109 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/982cec9f-9f6b-3bb2-8eb0-96cfa4313788/cluster_c1909289-147f-3345-59fd-781f6482af86/data/data1/current/BP-2043044047-172.17.0.2-1733618934874 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T00:49:10,109 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/982cec9f-9f6b-3bb2-8eb0-96cfa4313788/cluster_c1909289-147f-3345-59fd-781f6482af86/data/data2/current/BP-2043044047-172.17.0.2-1733618934874 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T00:49:10,109 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T00:49:10,115 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T00:49:10,118 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T00:49:10,119 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T00:49:10,119 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T00:49:10,119 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T00:49:10,119 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@431ff48f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/982cec9f-9f6b-3bb2-8eb0-96cfa4313788/hadoop.log.dir/,AVAILABLE} 2024-12-08T00:49:10,119 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5cc90b47{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T00:49:10,208 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@89d2ae2{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/982cec9f-9f6b-3bb2-8eb0-96cfa4313788/java.io.tmpdir/jetty-localhost-34299-hadoop-hdfs-3_4_1-tests_jar-_-any-785174033351820736/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T00:49:10,208 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2ad33209{HTTP/1.1, (http/1.1)}{localhost:34299} 2024-12-08T00:49:10,208 INFO [Time-limited test {}] server.Server(415): Started @169638ms 2024-12-08T00:49:10,209 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T00:49:10,328 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:10,371 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:10,462 WARN [Thread-1329 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T00:49:10,464 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x457982b596bcf258 with lease ID 0x1d416edf38cf9171: from storage DS-58b1bf7e-5c6e-4739-b326-ae88a0799027 node DatanodeRegistration(127.0.0.1:39279, datanodeUuid=5584fa3b-e14c-4e79-ba2e-d4b3219a978d, infoPort=37273, infoSecurePort=0, ipcPort=36623, storageInfo=lv=-57;cid=testClusterID;nsid=28432284;c=1733618934874), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T00:49:10,465 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x457982b596bcf258 with lease ID 0x1d416edf38cf9171: from storage DS-54b21332-71bc-4a57-89dc-03ebbff72250 node DatanodeRegistration(127.0.0.1:39279, datanodeUuid=5584fa3b-e14c-4e79-ba2e-d4b3219a978d, infoPort=37273, infoSecurePort=0, ipcPort=36623, storageInfo=lv=-57;cid=testClusterID;nsid=28432284;c=1733618934874), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T00:49:10,591 WARN [Thread-1349 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T00:49:10,594 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc199fbc696661e0e with lease ID 0x1d416edf38cf9172: from storage DS-3e53e221-56b1-47c9-a3fe-5e8efbec7405 node DatanodeRegistration(127.0.0.1:33061, datanodeUuid=79655df2-a483-4591-9bcf-bcb8d8bbb0fa, infoPort=38777, infoSecurePort=0, ipcPort=44399, storageInfo=lv=-57;cid=testClusterID;nsid=28432284;c=1733618934874), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T00:49:10,594 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc199fbc696661e0e with lease ID 0x1d416edf38cf9172: from storage DS-94ffcfad-c88a-4f09-8130-3e8660bb6f6a node DatanodeRegistration(127.0.0.1:33061, datanodeUuid=79655df2-a483-4591-9bcf-bcb8d8bbb0fa, infoPort=38777, infoSecurePort=0, ipcPort=44399, storageInfo=lv=-57;cid=testClusterID;nsid=28432284;c=1733618934874), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T00:49:11,234 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-12-08T00:49:11,240 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-12-08T00:49:11,243 ERROR [FSHLog-0-hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a-prefix:0f983e3e5be1,46417,1733618936694 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33349,DS-3e53e221-56b1-47c9-a3fe-5e8efbec7405,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:49:11,243 WARN [FSHLog-0-hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a-prefix:0f983e3e5be1,46417,1733618936694 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33349,DS-3e53e221-56b1-47c9-a3fe-5e8efbec7405,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:49:11,243 DEBUG [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 0f983e3e5be1%2C46417%2C1733618936694:(num 1733618937310) roll requested 2024-12-08T00:49:11,243 INFO [regionserver/0f983e3e5be1:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0f983e3e5be1%2C46417%2C1733618936694.1733618951243 2024-12-08T00:49:11,249 DEBUG [regionserver/0f983e3e5be1:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.1733618937310 newFile=hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.1733618951243 2024-12-08T00:49:11,249 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:49:11,249 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:49:11,249 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:49:11,249 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:49:11,249 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:49:11,250 INFO [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.1733618937310 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.1733618951243 2024-12-08T00:49:11,250 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33349,DS-3e53e221-56b1-47c9-a3fe-5e8efbec7405,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:49:11,250 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33349,DS-3e53e221-56b1-47c9-a3fe-5e8efbec7405,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:49:11,250 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.1733618937310 2024-12-08T00:49:11,251 DEBUG [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38777:38777),(127.0.0.1/127.0.0.1:37273:37273)] 2024-12-08T00:49:11,251 WARN [IPC Server handler 0 on default port 37255 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.1733618937310 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1014 2024-12-08T00:49:11,251 DEBUG [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.1733618937310 is not closed yet, will try archiving it next time 2024-12-08T00:49:11,251 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.1733618937310 after 1ms 2024-12-08T00:49:11,329 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:11,372 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:12,330 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:12,374 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:13,254 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-12-08T00:49:13,331 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:13,375 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:13,466 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1014: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-08T00:49:14,332 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:14,376 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:15,253 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.1733618937310 after 4002ms 2024-12-08T00:49:15,257 WARN [ResponseProcessor for block BP-2043044047-172.17.0.2-1733618934874:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2043044047-172.17.0.2-1733618934874:blk_1073741837_1016 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:49:15,258 WARN [DataStreamer for file /user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.1733618951243 block BP-2043044047-172.17.0.2-1733618934874:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2043044047-172.17.0.2-1733618934874:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33061,DS-3e53e221-56b1-47c9-a3fe-5e8efbec7405,DISK], DatanodeInfoWithStorage[127.0.0.1:39279,DS-58b1bf7e-5c6e-4739-b326-ae88a0799027,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33061,DS-3e53e221-56b1-47c9-a3fe-5e8efbec7405,DISK]) is bad. 2024-12-08T00:49:15,259 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_389193149_22 at /127.0.0.1:42382 [Receiving block BP-2043044047-172.17.0.2-1733618934874:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:39279:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42382 dst: /127.0.0.1:39279 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:49:15,259 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_389193149_22 at /127.0.0.1:35528 [Receiving block BP-2043044047-172.17.0.2-1733618934874:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:33061:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35528 dst: /127.0.0.1:33061 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:49:15,310 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@89d2ae2{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T00:49:15,311 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2ad33209{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T00:49:15,311 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T00:49:15,312 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5cc90b47{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T00:49:15,312 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@431ff48f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/982cec9f-9f6b-3bb2-8eb0-96cfa4313788/hadoop.log.dir/,STOPPED} 2024-12-08T00:49:15,315 WARN [BP-2043044047-172.17.0.2-1733618934874 heartbeating to localhost/127.0.0.1:37255 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T00:49:15,315 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T00:49:15,316 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T00:49:15,316 WARN [BP-2043044047-172.17.0.2-1733618934874 heartbeating to localhost/127.0.0.1:37255 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2043044047-172.17.0.2-1733618934874 (Datanode Uuid 79655df2-a483-4591-9bcf-bcb8d8bbb0fa) service to localhost/127.0.0.1:37255 2024-12-08T00:49:15,317 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/982cec9f-9f6b-3bb2-8eb0-96cfa4313788/cluster_c1909289-147f-3345-59fd-781f6482af86/data/data1/current/BP-2043044047-172.17.0.2-1733618934874 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T00:49:15,318 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/982cec9f-9f6b-3bb2-8eb0-96cfa4313788/cluster_c1909289-147f-3345-59fd-781f6482af86/data/data2/current/BP-2043044047-172.17.0.2-1733618934874 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T00:49:15,318 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T00:49:15,325 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T00:49:15,327 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T00:49:15,328 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T00:49:15,328 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T00:49:15,328 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-08T00:49:15,328 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@54680a3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/982cec9f-9f6b-3bb2-8eb0-96cfa4313788/hadoop.log.dir/,AVAILABLE} 2024-12-08T00:49:15,328 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47645a2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T00:49:15,333 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:15,376 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:15,418 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@51f33716{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/982cec9f-9f6b-3bb2-8eb0-96cfa4313788/java.io.tmpdir/jetty-localhost-34097-hadoop-hdfs-3_4_1-tests_jar-_-any-13872122015321116923/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T00:49:15,419 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@75d1d597{HTTP/1.1, (http/1.1)}{localhost:34097} 2024-12-08T00:49:15,419 INFO [Time-limited test {}] server.Server(415): Started @174849ms 2024-12-08T00:49:15,420 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T00:49:15,435 WARN [ResponseProcessor for block BP-2043044047-172.17.0.2-1733618934874:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2043044047-172.17.0.2-1733618934874:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:49:15,436 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_389193149_22 at /127.0.0.1:42388 [Receiving block BP-2043044047-172.17.0.2-1733618934874:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:39279:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42388 dst: /127.0.0.1:39279 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:49:15,443 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5fb420d9{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T00:49:15,443 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@140d49ad{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T00:49:15,443 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T00:49:15,443 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@c15ccec{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T00:49:15,443 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@920ba2b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/982cec9f-9f6b-3bb2-8eb0-96cfa4313788/hadoop.log.dir/,STOPPED} 2024-12-08T00:49:15,444 WARN [BP-2043044047-172.17.0.2-1733618934874 heartbeating to localhost/127.0.0.1:37255 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T00:49:15,444 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T00:49:15,444 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T00:49:15,444 WARN [BP-2043044047-172.17.0.2-1733618934874 heartbeating to localhost/127.0.0.1:37255 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2043044047-172.17.0.2-1733618934874 (Datanode Uuid 5584fa3b-e14c-4e79-ba2e-d4b3219a978d) service to localhost/127.0.0.1:37255 2024-12-08T00:49:15,445 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/982cec9f-9f6b-3bb2-8eb0-96cfa4313788/cluster_c1909289-147f-3345-59fd-781f6482af86/data/data3/current/BP-2043044047-172.17.0.2-1733618934874 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T00:49:15,445 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/982cec9f-9f6b-3bb2-8eb0-96cfa4313788/cluster_c1909289-147f-3345-59fd-781f6482af86/data/data4/current/BP-2043044047-172.17.0.2-1733618934874 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T00:49:15,445 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T00:49:15,456 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T00:49:15,459 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T00:49:15,460 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T00:49:15,460 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T00:49:15,460 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-08T00:49:15,460 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@34137c70{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/982cec9f-9f6b-3bb2-8eb0-96cfa4313788/hadoop.log.dir/,AVAILABLE} 2024-12-08T00:49:15,461 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@17233de7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T00:49:15,556 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4a5bd99d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/982cec9f-9f6b-3bb2-8eb0-96cfa4313788/java.io.tmpdir/jetty-localhost-35481-hadoop-hdfs-3_4_1-tests_jar-_-any-14106257240691221693/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T00:49:15,556 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@77746ad2{HTTP/1.1, (http/1.1)}{localhost:35481} 2024-12-08T00:49:15,556 INFO [Time-limited test {}] server.Server(415): Started @174986ms 2024-12-08T00:49:15,557 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T00:49:15,763 WARN [Thread-1403 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T00:49:15,765 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xaeb49355b5e705fd with lease ID 0x1d416edf38cf9173: from storage DS-3e53e221-56b1-47c9-a3fe-5e8efbec7405 node DatanodeRegistration(127.0.0.1:40137, datanodeUuid=79655df2-a483-4591-9bcf-bcb8d8bbb0fa, infoPort=33645, infoSecurePort=0, ipcPort=36687, storageInfo=lv=-57;cid=testClusterID;nsid=28432284;c=1733618934874), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T00:49:15,765 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xaeb49355b5e705fd with lease ID 0x1d416edf38cf9173: from storage DS-94ffcfad-c88a-4f09-8130-3e8660bb6f6a node DatanodeRegistration(127.0.0.1:40137, datanodeUuid=79655df2-a483-4591-9bcf-bcb8d8bbb0fa, infoPort=33645, infoSecurePort=0, ipcPort=36687, storageInfo=lv=-57;cid=testClusterID;nsid=28432284;c=1733618934874), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T00:49:15,891 WARN [Thread-1423 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T00:49:15,893 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x840e2f1886a46527 with lease ID 0x1d416edf38cf9174: from storage DS-58b1bf7e-5c6e-4739-b326-ae88a0799027 node DatanodeRegistration(127.0.0.1:39967, datanodeUuid=5584fa3b-e14c-4e79-ba2e-d4b3219a978d, infoPort=40499, infoSecurePort=0, ipcPort=38713, storageInfo=lv=-57;cid=testClusterID;nsid=28432284;c=1733618934874), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T00:49:15,894 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x840e2f1886a46527 with lease ID 0x1d416edf38cf9174: from storage DS-54b21332-71bc-4a57-89dc-03ebbff72250 node DatanodeRegistration(127.0.0.1:39967, datanodeUuid=5584fa3b-e14c-4e79-ba2e-d4b3219a978d, infoPort=40499, infoSecurePort=0, ipcPort=38713, storageInfo=lv=-57;cid=testClusterID;nsid=28432284;c=1733618934874), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T00:49:16,334 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:16,377 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:16,577 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-12-08T00:49:16,582 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-12-08T00:49:16,585 ERROR [FSHLog-0-hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a-prefix:0f983e3e5be1,46417,1733618936694 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39279,DS-58b1bf7e-5c6e-4739-b326-ae88a0799027,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:49:16,586 WARN [FSHLog-0-hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a-prefix:0f983e3e5be1,46417,1733618936694 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39279,DS-58b1bf7e-5c6e-4739-b326-ae88a0799027,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:49:16,586 DEBUG [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 0f983e3e5be1%2C46417%2C1733618936694:(num 1733618951243) roll requested 2024-12-08T00:49:16,586 INFO [regionserver/0f983e3e5be1:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0f983e3e5be1%2C46417%2C1733618936694.1733618956586 2024-12-08T00:49:16,590 DEBUG [regionserver/0f983e3e5be1:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.1733618951243 newFile=hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.1733618956586 2024-12-08T00:49:16,590 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:49:16,591 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:49:16,591 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:49:16,591 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:49:16,591 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:49:16,591 INFO [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.1733618951243 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.1733618956586 2024-12-08T00:49:16,591 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39279,DS-58b1bf7e-5c6e-4739-b326-ae88a0799027,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:49:16,591 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39279,DS-58b1bf7e-5c6e-4739-b326-ae88a0799027,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:49:16,591 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.1733618951243 2024-12-08T00:49:16,591 DEBUG [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40499:40499),(127.0.0.1/127.0.0.1:33645:33645)] 2024-12-08T00:49:16,592 DEBUG [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.1733618951243 is not closed yet, will try archiving it next time 2024-12-08T00:49:16,592 WARN [IPC Server handler 1 on default port 37255 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.1733618951243 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-12-08T00:49:16,592 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.1733618951243 after 1ms 2024-12-08T00:49:17,335 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:17,378 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:18,337 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:18,379 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:18,593 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0f983e3e5be1%2C46417%2C1733618936694.1733618958593 2024-12-08T00:49:18,604 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.1733618956586 newFile=hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.1733618958593 2024-12-08T00:49:18,604 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:49:18,604 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:49:18,605 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:49:18,605 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:49:18,605 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:49:18,605 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.1733618956586 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.1733618958593 2024-12-08T00:49:18,606 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33645:33645),(127.0.0.1/127.0.0.1:40499:40499)] 2024-12-08T00:49:18,606 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.1733618951243 is not closed yet, will try archiving it next time 2024-12-08T00:49:18,606 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.1733618956586 is not closed yet, will try archiving it next time 2024-12-08T00:49:18,606 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.1733618937310 2024-12-08T00:49:18,606 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.1733618937310 2024-12-08T00:49:18,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40137 is added to blk_1073741838_1019 (size=1264) 2024-12-08T00:49:18,607 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.1733618937310 after 1ms 2024-12-08T00:49:18,607 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.1733618937310 2024-12-08T00:49:18,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39967 is added to blk_1073741838_1019 (size=1264) 2024-12-08T00:49:18,609 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.1733618951243 is not closed yet, will try archiving it next time 2024-12-08T00:49:18,616 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1733618938186/Put/vlen=218/seqid=0] 2024-12-08T00:49:18,616 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1733618947910/Put/vlen=1045/seqid=0] 2024-12-08T00:49:18,616 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.1733618937310 2024-12-08T00:49:18,616 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.1733618951243 2024-12-08T00:49:18,616 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.1733618951243 2024-12-08T00:49:18,617 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.1733618951243 after 1ms 2024-12-08T00:49:18,617 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.1733618951243 2024-12-08T00:49:18,620 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1733618951242/Put/vlen=1045/seqid=0] 2024-12-08T00:49:18,620 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1733618953255/Put/vlen=1045/seqid=0] 2024-12-08T00:49:18,620 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.1733618951243 2024-12-08T00:49:18,620 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.1733618956586 2024-12-08T00:49:18,620 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.1733618956586 2024-12-08T00:49:18,621 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.1733618956586 after 1ms 2024-12-08T00:49:18,621 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.1733618956586 2024-12-08T00:49:18,623 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1733618956585/Put/vlen=1045/seqid=0] 2024-12-08T00:49:18,623 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.1733618958593 2024-12-08T00:49:18,623 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.1733618958593 2024-12-08T00:49:18,624 WARN [IPC Server handler 2 on default port 37255 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.1733618958593 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-12-08T00:49:18,624 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.1733618958593 after 0ms 2024-12-08T00:49:18,901 WARN [ResponseProcessor for block BP-2043044047-172.17.0.2-1733618934874:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2043044047-172.17.0.2-1733618934874:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:49:18,901 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1513898204_22 at /127.0.0.1:49044 [Receiving block BP-2043044047-172.17.0.2-1733618934874:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:40137:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49044 dst: /127.0.0.1:40137 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:40137 remote=/127.0.0.1:49044]. Total timeout mills is 60000, 59703 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:49:18,901 WARN [DataStreamer for file /user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.1733618958593 block BP-2043044047-172.17.0.2-1733618934874:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2043044047-172.17.0.2-1733618934874:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40137,DS-3e53e221-56b1-47c9-a3fe-5e8efbec7405,DISK], DatanodeInfoWithStorage[127.0.0.1:39967,DS-58b1bf7e-5c6e-4739-b326-ae88a0799027,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40137,DS-3e53e221-56b1-47c9-a3fe-5e8efbec7405,DISK]) is bad. 2024-12-08T00:49:18,901 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1513898204_22 at /127.0.0.1:43408 [Receiving block BP-2043044047-172.17.0.2-1733618934874:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:39967:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43408 dst: /127.0.0.1:39967 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:49:18,905 WARN [DataStreamer for file /user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.1733618958593 block BP-2043044047-172.17.0.2-1733618934874:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-2043044047-172.17.0.2-1733618934874:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:49:18,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40137 is added to blk_1073741839_1022 (size=85) 2024-12-08T00:49:19,338 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:19,381 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:19,767 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-08T00:49:20,339 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:20,382 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:20,593 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.1733618951243 after 4002ms 2024-12-08T00:49:21,340 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:21,383 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:22,342 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:22,384 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:22,626 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.1733618958593 after 4002ms 2024-12-08T00:49:22,626 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.1733618958593 2024-12-08T00:49:22,635 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.1733618958593 2024-12-08T00:49:22,636 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-12-08T00:49:22,636 ERROR [FSHLog-0-hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a-prefix:0f983e3e5be1,46417,1733618936694.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33349,DS-3e53e221-56b1-47c9-a3fe-5e8efbec7405,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:49:22,636 WARN [FSHLog-0-hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a-prefix:0f983e3e5be1,46417,1733618936694.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33349,DS-3e53e221-56b1-47c9-a3fe-5e8efbec7405,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:49:22,636 DEBUG [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 0f983e3e5be1%2C46417%2C1733618936694.meta:.meta(num 1733618937679) roll requested 2024-12-08T00:49:22,636 INFO [regionserver/0f983e3e5be1:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0f983e3e5be1%2C46417%2C1733618936694.meta.1733618962636.meta 2024-12-08T00:49:22,641 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:49:22,641 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:49:22,642 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:49:22,642 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:49:22,642 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:49:22,642 INFO [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.meta.1733618937679.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.meta.1733618962636.meta 2024-12-08T00:49:22,642 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33349,DS-3e53e221-56b1-47c9-a3fe-5e8efbec7405,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:49:22,642 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33349,DS-3e53e221-56b1-47c9-a3fe-5e8efbec7405,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:49:22,642 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.meta.1733618937679.meta 2024-12-08T00:49:22,642 DEBUG [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40499:40499),(127.0.0.1/127.0.0.1:33645:33645)] 2024-12-08T00:49:22,643 DEBUG [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.meta.1733618937679.meta is not closed yet, will try archiving it next time 2024-12-08T00:49:22,643 WARN [IPC Server handler 3 on default port 37255 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.meta.1733618937679.meta has not been closed. Lease recovery is in progress. RecoveryId = 1024 for block blk_1073741834_1013 2024-12-08T00:49:22,643 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.meta.1733618937679.meta after 1ms 2024-12-08T00:49:22,655 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/data/hbase/meta/1588230740/.tmp/info/26e10ab65527421d8daeeaf6e803b5fe is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1733618937831.ffc8037c0d95653204c1d5fe84f159d2./info:regioninfo/1733618938191/Put/seqid=0 2024-12-08T00:49:22,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39967 is added to blk_1073741841_1025 (size=7125) 2024-12-08T00:49:22,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40137 is added to blk_1073741841_1025 (size=7125) 2024-12-08T00:49:22,660 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/data/hbase/meta/1588230740/.tmp/info/26e10ab65527421d8daeeaf6e803b5fe 2024-12-08T00:49:22,677 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/data/hbase/meta/1588230740/.tmp/ns/df93f8b9d2de44f2954b9a66ff2ee7e8 is 43, key is default/ns:d/1733618937762/Put/seqid=0 2024-12-08T00:49:22,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40137 is added to blk_1073741842_1026 (size=5153) 2024-12-08T00:49:22,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39967 is added to blk_1073741842_1026 (size=5153) 2024-12-08T00:49:22,682 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/data/hbase/meta/1588230740/.tmp/ns/df93f8b9d2de44f2954b9a66ff2ee7e8 2024-12-08T00:49:22,699 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/data/hbase/meta/1588230740/.tmp/table/5ca53e3877ab48c0a0f3e114b5dfcbdd is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1733618938202/Put/seqid=0 2024-12-08T00:49:22,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40137 is added to blk_1073741843_1027 (size=5438) 2024-12-08T00:49:22,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39967 is added to blk_1073741843_1027 (size=5438) 2024-12-08T00:49:22,704 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/data/hbase/meta/1588230740/.tmp/table/5ca53e3877ab48c0a0f3e114b5dfcbdd 2024-12-08T00:49:22,708 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/data/hbase/meta/1588230740/.tmp/info/26e10ab65527421d8daeeaf6e803b5fe as hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/data/hbase/meta/1588230740/info/26e10ab65527421d8daeeaf6e803b5fe 2024-12-08T00:49:22,713 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/data/hbase/meta/1588230740/info/26e10ab65527421d8daeeaf6e803b5fe, entries=10, sequenceid=11, filesize=7.0 K 2024-12-08T00:49:22,714 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/data/hbase/meta/1588230740/.tmp/ns/df93f8b9d2de44f2954b9a66ff2ee7e8 as hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/data/hbase/meta/1588230740/ns/df93f8b9d2de44f2954b9a66ff2ee7e8 2024-12-08T00:49:22,719 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/data/hbase/meta/1588230740/ns/df93f8b9d2de44f2954b9a66ff2ee7e8, entries=2, sequenceid=11, filesize=5.0 K 2024-12-08T00:49:22,720 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/data/hbase/meta/1588230740/.tmp/table/5ca53e3877ab48c0a0f3e114b5dfcbdd as hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/data/hbase/meta/1588230740/table/5ca53e3877ab48c0a0f3e114b5dfcbdd 2024-12-08T00:49:22,725 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/data/hbase/meta/1588230740/table/5ca53e3877ab48c0a0f3e114b5dfcbdd, entries=2, sequenceid=11, filesize=5.3 K 2024-12-08T00:49:22,726 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 90ms, sequenceid=11, compaction requested=false 2024-12-08T00:49:22,727 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-12-08T00:49:22,727 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing ffc8037c0d95653204c1d5fe84f159d2 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-12-08T00:49:22,727 ERROR [FSHLog-0-hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a-prefix:0f983e3e5be1,46417,1733618936694 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-2043044047-172.17.0.2-1733618934874:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:49:22,727 WARN [FSHLog-0-hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a-prefix:0f983e3e5be1,46417,1733618936694 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-2043044047-172.17.0.2-1733618934874:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:49:22,728 DEBUG [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 0f983e3e5be1%2C46417%2C1733618936694:(num 1733618958593) roll requested 2024-12-08T00:49:22,728 INFO [regionserver/0f983e3e5be1:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0f983e3e5be1%2C46417%2C1733618936694.1733618962728 2024-12-08T00:49:22,732 DEBUG [regionserver/0f983e3e5be1:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.1733618958593 newFile=hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.1733618962728 2024-12-08T00:49:22,732 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:49:22,733 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:49:22,733 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:49:22,733 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:49:22,733 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:49:22,733 INFO [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.1733618958593 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.1733618962728 2024-12-08T00:49:22,733 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-2043044047-172.17.0.2-1733618934874:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:49:22,733 DEBUG [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33645:33645),(127.0.0.1/127.0.0.1:40499:40499)] 2024-12-08T00:49:22,733 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-2043044047-172.17.0.2-1733618934874:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:49:22,734 DEBUG [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.1733618958593 is not closed yet, will try archiving it next time 2024-12-08T00:49:22,734 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.1733618958593 2024-12-08T00:49:22,734 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.1733618958593 after 0ms 2024-12-08T00:49:22,734 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.1733618958593 to hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/oldWALs/0f983e3e5be1%2C46417%2C1733618936694.1733618958593 2024-12-08T00:49:22,746 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/data/default/TestLogRolling-testLogRollOnPipelineRestart/ffc8037c0d95653204c1d5fe84f159d2/.tmp/info/a0dd2ed8511f4eefa84fd79db02aa740 is 1080, key is row1002/info:/1733618947910/Put/seqid=0 2024-12-08T00:49:22,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39967 is added to blk_1073741845_1029 (size=9270) 2024-12-08T00:49:22,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40137 is added to blk_1073741845_1029 (size=9270) 2024-12-08T00:49:22,750 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/data/default/TestLogRolling-testLogRollOnPipelineRestart/ffc8037c0d95653204c1d5fe84f159d2/.tmp/info/a0dd2ed8511f4eefa84fd79db02aa740 2024-12-08T00:49:22,757 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/data/default/TestLogRolling-testLogRollOnPipelineRestart/ffc8037c0d95653204c1d5fe84f159d2/.tmp/info/a0dd2ed8511f4eefa84fd79db02aa740 as hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/data/default/TestLogRolling-testLogRollOnPipelineRestart/ffc8037c0d95653204c1d5fe84f159d2/info/a0dd2ed8511f4eefa84fd79db02aa740 2024-12-08T00:49:22,761 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/data/default/TestLogRolling-testLogRollOnPipelineRestart/ffc8037c0d95653204c1d5fe84f159d2/info/a0dd2ed8511f4eefa84fd79db02aa740, entries=4, sequenceid=8, filesize=9.1 K 2024-12-08T00:49:22,762 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for ffc8037c0d95653204c1d5fe84f159d2 in 35ms, sequenceid=8, compaction requested=false 2024-12-08T00:49:22,762 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for ffc8037c0d95653204c1d5fe84f159d2: 2024-12-08T00:49:22,768 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-08T00:49:22,768 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-08T00:49:22,768 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T00:49:22,768 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:49:22,768 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:49:22,768 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-08T00:49:22,769 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-08T00:49:22,769 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=897892332, stopped=false 2024-12-08T00:49:22,769 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=0f983e3e5be1,40363,1733618936495 2024-12-08T00:49:22,828 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46417-0x10002f3693d0001, quorum=127.0.0.1:55980, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T00:49:22,828 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40363-0x10002f3693d0000, quorum=127.0.0.1:55980, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T00:49:22,829 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46417-0x10002f3693d0001, quorum=127.0.0.1:55980, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:49:22,829 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40363-0x10002f3693d0000, quorum=127.0.0.1:55980, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:49:22,829 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-08T00:49:22,829 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-08T00:49:22,830 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T00:49:22,830 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:49:22,830 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40363-0x10002f3693d0000, quorum=127.0.0.1:55980, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T00:49:22,830 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46417-0x10002f3693d0001, quorum=127.0.0.1:55980, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T00:49:22,830 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '0f983e3e5be1,46417,1733618936694' ***** 2024-12-08T00:49:22,830 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-08T00:49:22,831 INFO [RS:0;0f983e3e5be1:46417 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-08T00:49:22,832 INFO [RS:0;0f983e3e5be1:46417 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-08T00:49:22,832 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-08T00:49:22,832 INFO [RS:0;0f983e3e5be1:46417 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-08T00:49:22,832 INFO [RS:0;0f983e3e5be1:46417 {}] regionserver.HRegionServer(3091): Received CLOSE for ffc8037c0d95653204c1d5fe84f159d2 2024-12-08T00:49:22,832 INFO [RS:0;0f983e3e5be1:46417 {}] regionserver.HRegionServer(959): stopping server 0f983e3e5be1,46417,1733618936694 2024-12-08T00:49:22,833 INFO [RS:0;0f983e3e5be1:46417 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T00:49:22,833 INFO [RS:0;0f983e3e5be1:46417 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;0f983e3e5be1:46417. 2024-12-08T00:49:22,833 DEBUG [RS:0;0f983e3e5be1:46417 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T00:49:22,833 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing ffc8037c0d95653204c1d5fe84f159d2, disabling compactions & flushes 2024-12-08T00:49:22,833 DEBUG [RS:0;0f983e3e5be1:46417 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:49:22,833 INFO [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1733618937831.ffc8037c0d95653204c1d5fe84f159d2. 2024-12-08T00:49:22,833 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733618937831.ffc8037c0d95653204c1d5fe84f159d2. 2024-12-08T00:49:22,833 INFO [RS:0;0f983e3e5be1:46417 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-08T00:49:22,833 INFO [RS:0;0f983e3e5be1:46417 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-08T00:49:22,833 INFO [RS:0;0f983e3e5be1:46417 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-08T00:49:22,833 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733618937831.ffc8037c0d95653204c1d5fe84f159d2. after waiting 0 ms 2024-12-08T00:49:22,833 INFO [RS:0;0f983e3e5be1:46417 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-08T00:49:22,833 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1733618937831.ffc8037c0d95653204c1d5fe84f159d2. 2024-12-08T00:49:22,834 INFO [RS:0;0f983e3e5be1:46417 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-08T00:49:22,834 DEBUG [RS:0;0f983e3e5be1:46417 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, ffc8037c0d95653204c1d5fe84f159d2=TestLogRolling-testLogRollOnPipelineRestart,,1733618937831.ffc8037c0d95653204c1d5fe84f159d2.} 2024-12-08T00:49:22,834 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-08T00:49:22,834 DEBUG [RS:0;0f983e3e5be1:46417 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, ffc8037c0d95653204c1d5fe84f159d2 2024-12-08T00:49:22,834 INFO [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-08T00:49:22,834 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-08T00:49:22,834 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-08T00:49:22,834 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-08T00:49:22,839 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/data/default/TestLogRolling-testLogRollOnPipelineRestart/ffc8037c0d95653204c1d5fe84f159d2/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-12-08T00:49:22,839 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-08T00:49:22,840 INFO [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733618937831.ffc8037c0d95653204c1d5fe84f159d2. 2024-12-08T00:49:22,840 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-08T00:49:22,840 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for ffc8037c0d95653204c1d5fe84f159d2: Waiting for close lock at 1733618962833Running coprocessor pre-close hooks at 1733618962833Disabling compacts and flushes for region at 1733618962833Disabling writes for close at 1733618962833Writing region close event to WAL at 1733618962835 (+2 ms)Running coprocessor post-close hooks at 1733618962840 (+5 ms)Closed at 1733618962840 2024-12-08T00:49:22,840 INFO [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-08T00:49:22,840 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733618937831.ffc8037c0d95653204c1d5fe84f159d2. 2024-12-08T00:49:22,840 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733618962834Running coprocessor pre-close hooks at 1733618962834Disabling compacts and flushes for region at 1733618962834Disabling writes for close at 1733618962834Writing region close event to WAL at 1733618962837 (+3 ms)Running coprocessor post-close hooks at 1733618962840 (+3 ms)Closed at 1733618962840 2024-12-08T00:49:22,840 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-08T00:49:23,034 INFO [RS:0;0f983e3e5be1:46417 {}] regionserver.HRegionServer(976): stopping server 0f983e3e5be1,46417,1733618936694; all regions closed. 2024-12-08T00:49:23,036 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:49:23,036 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:49:23,036 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:49:23,037 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:49:23,037 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:49:23,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40137 is added to blk_1073741840_1023 (size=825) 2024-12-08T00:49:23,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39967 is added to blk_1073741840_1023 (size=825) 2024-12-08T00:49:23,182 INFO [regionserver/0f983e3e5be1:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T00:49:23,183 INFO [regionserver/0f983e3e5be1:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-08T00:49:23,183 INFO [regionserver/0f983e3e5be1:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-08T00:49:23,343 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:23,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:24,344 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:24,387 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:25,346 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:25,388 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:25,894 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1013: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-08T00:49:25,911 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-08T00:49:25,912 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-08T00:49:25,912 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-12-08T00:49:26,347 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:26,389 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:26,476 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-08T00:49:26,644 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.meta.1733618937679.meta after 4002ms 2024-12-08T00:49:26,646 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/WALs/0f983e3e5be1,46417,1733618936694/0f983e3e5be1%2C46417%2C1733618936694.meta.1733618937679.meta to hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/oldWALs/0f983e3e5be1%2C46417%2C1733618936694.meta.1733618937679.meta 2024-12-08T00:49:26,653 DEBUG [RS:0;0f983e3e5be1:46417 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/oldWALs 2024-12-08T00:49:26,654 INFO [RS:0;0f983e3e5be1:46417 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 0f983e3e5be1%2C46417%2C1733618936694.meta:.meta(num 1733618962636) 2024-12-08T00:49:26,654 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:49:26,654 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:49:26,655 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:49:26,655 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:49:26,655 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:49:26,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39967 is added to blk_1073741844_1028 (size=1162) 2024-12-08T00:49:26,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40137 is added to blk_1073741844_1028 (size=1162) 2024-12-08T00:49:26,662 DEBUG [RS:0;0f983e3e5be1:46417 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/oldWALs 2024-12-08T00:49:26,662 INFO [RS:0;0f983e3e5be1:46417 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 0f983e3e5be1%2C46417%2C1733618936694:(num 1733618962728) 2024-12-08T00:49:26,662 DEBUG [RS:0;0f983e3e5be1:46417 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:49:26,662 INFO [RS:0;0f983e3e5be1:46417 {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T00:49:26,662 INFO [RS:0;0f983e3e5be1:46417 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T00:49:26,663 INFO [RS:0;0f983e3e5be1:46417 {}] hbase.ChoreService(370): Chore service for: regionserver/0f983e3e5be1:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-08T00:49:26,663 INFO [RS:0;0f983e3e5be1:46417 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T00:49:26,663 INFO [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T00:49:26,663 INFO [RS:0;0f983e3e5be1:46417 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46417 2024-12-08T00:49:26,727 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40363-0x10002f3693d0000, quorum=127.0.0.1:55980, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T00:49:26,727 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46417-0x10002f3693d0001, quorum=127.0.0.1:55980, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/0f983e3e5be1,46417,1733618936694 2024-12-08T00:49:26,727 INFO [RS:0;0f983e3e5be1:46417 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T00:49:26,736 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [0f983e3e5be1,46417,1733618936694] 2024-12-08T00:49:26,744 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/0f983e3e5be1,46417,1733618936694 already deleted, retry=false 2024-12-08T00:49:26,744 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 0f983e3e5be1,46417,1733618936694 expired; onlineServers=0 2024-12-08T00:49:26,744 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '0f983e3e5be1,40363,1733618936495' ***** 2024-12-08T00:49:26,744 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-08T00:49:26,744 INFO [M:0;0f983e3e5be1:40363 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T00:49:26,745 INFO [M:0;0f983e3e5be1:40363 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T00:49:26,745 DEBUG [M:0;0f983e3e5be1:40363 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-08T00:49:26,745 DEBUG [M:0;0f983e3e5be1:40363 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-08T00:49:26,745 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster-HFileCleaner.small.0-1733618937022 {}] cleaner.HFileCleaner(306): Exit Thread[master/0f983e3e5be1:0:becomeActiveMaster-HFileCleaner.small.0-1733618937022,5,FailOnTimeoutGroup] 2024-12-08T00:49:26,745 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster-HFileCleaner.large.0-1733618937022 {}] cleaner.HFileCleaner(306): Exit Thread[master/0f983e3e5be1:0:becomeActiveMaster-HFileCleaner.large.0-1733618937022,5,FailOnTimeoutGroup] 2024-12-08T00:49:26,745 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-08T00:49:26,746 INFO [M:0;0f983e3e5be1:40363 {}] hbase.ChoreService(370): Chore service for: master/0f983e3e5be1:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-08T00:49:26,746 INFO [M:0;0f983e3e5be1:40363 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T00:49:26,746 DEBUG [M:0;0f983e3e5be1:40363 {}] master.HMaster(1795): Stopping service threads 2024-12-08T00:49:26,746 INFO [M:0;0f983e3e5be1:40363 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-08T00:49:26,747 INFO [M:0;0f983e3e5be1:40363 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-08T00:49:26,747 INFO [M:0;0f983e3e5be1:40363 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-08T00:49:26,747 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-08T00:49:26,752 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40363-0x10002f3693d0000, quorum=127.0.0.1:55980, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-08T00:49:26,752 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40363-0x10002f3693d0000, quorum=127.0.0.1:55980, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:49:26,753 DEBUG [M:0;0f983e3e5be1:40363 {}] zookeeper.ZKUtil(347): master:40363-0x10002f3693d0000, quorum=127.0.0.1:55980, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-08T00:49:26,753 WARN [M:0;0f983e3e5be1:40363 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-08T00:49:26,753 INFO [M:0;0f983e3e5be1:40363 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/.lastflushedseqids 2024-12-08T00:49:26,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39967 is added to blk_1073741846_1030 (size=120) 2024-12-08T00:49:26,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40137 is added to blk_1073741846_1030 (size=120) 2024-12-08T00:49:26,761 INFO [M:0;0f983e3e5be1:40363 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-08T00:49:26,761 INFO [M:0;0f983e3e5be1:40363 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-08T00:49:26,761 DEBUG [M:0;0f983e3e5be1:40363 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-08T00:49:26,761 INFO [M:0;0f983e3e5be1:40363 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:49:26,761 DEBUG [M:0;0f983e3e5be1:40363 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:49:26,761 DEBUG [M:0;0f983e3e5be1:40363 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-08T00:49:26,761 DEBUG [M:0;0f983e3e5be1:40363 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:49:26,761 INFO [M:0;0f983e3e5be1:40363 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.17 KB heapSize=29.16 KB 2024-12-08T00:49:26,762 ERROR [FSHLog-0-hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/MasterData-prefix:0f983e3e5be1,40363,1733618936495 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33349,DS-3e53e221-56b1-47c9-a3fe-5e8efbec7405,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:49:26,762 WARN [FSHLog-0-hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/MasterData-prefix:0f983e3e5be1,40363,1733618936495 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33349,DS-3e53e221-56b1-47c9-a3fe-5e8efbec7405,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:49:26,762 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 0f983e3e5be1%2C40363%2C1733618936495:(num 1733618936831) roll requested 2024-12-08T00:49:26,762 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0f983e3e5be1%2C40363%2C1733618936495.1733618966762 2024-12-08T00:49:26,768 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:49:26,768 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:49:26,769 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:49:26,769 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:49:26,769 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:49:26,769 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/MasterData/WALs/0f983e3e5be1,40363,1733618936495/0f983e3e5be1%2C40363%2C1733618936495.1733618936831 with entries=53, filesize=26.62 KB; new WAL /user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/MasterData/WALs/0f983e3e5be1,40363,1733618936495/0f983e3e5be1%2C40363%2C1733618936495.1733618966762 2024-12-08T00:49:26,769 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33349,DS-3e53e221-56b1-47c9-a3fe-5e8efbec7405,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:49:26,770 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33349,DS-3e53e221-56b1-47c9-a3fe-5e8efbec7405,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T00:49:26,770 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/MasterData/WALs/0f983e3e5be1,40363,1733618936495/0f983e3e5be1%2C40363%2C1733618936495.1733618936831 2024-12-08T00:49:26,770 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40499:40499),(127.0.0.1/127.0.0.1:33645:33645)] 2024-12-08T00:49:26,770 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/MasterData/WALs/0f983e3e5be1,40363,1733618936495/0f983e3e5be1%2C40363%2C1733618936495.1733618936831 is not closed yet, will try archiving it next time 2024-12-08T00:49:26,770 WARN [IPC Server handler 0 on default port 37255 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/MasterData/WALs/0f983e3e5be1,40363,1733618936495/0f983e3e5be1%2C40363%2C1733618936495.1733618936831 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1015 2024-12-08T00:49:26,770 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/MasterData/WALs/0f983e3e5be1,40363,1733618936495/0f983e3e5be1%2C40363%2C1733618936495.1733618936831 after 0ms 2024-12-08T00:49:26,788 DEBUG [M:0;0f983e3e5be1:40363 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ecbd83118d1440b4a34a92b5acbb534d is 82, key is hbase:meta,,1/info:regioninfo/1733618937708/Put/seqid=0 2024-12-08T00:49:26,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40137 is added to blk_1073741848_1033 (size=5672) 2024-12-08T00:49:26,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39967 is added to blk_1073741848_1033 (size=5672) 2024-12-08T00:49:26,792 INFO [M:0;0f983e3e5be1:40363 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ecbd83118d1440b4a34a92b5acbb534d 2024-12-08T00:49:26,810 DEBUG [M:0;0f983e3e5be1:40363 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/da2d8c4e446a48cabc10c3f65a7a59f9 is 778, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733618938206/Put/seqid=0 2024-12-08T00:49:26,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40137 is added to blk_1073741849_1034 (size=6118) 2024-12-08T00:49:26,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39967 is added to blk_1073741849_1034 (size=6118) 2024-12-08T00:49:26,815 INFO [M:0;0f983e3e5be1:40363 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.57 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/da2d8c4e446a48cabc10c3f65a7a59f9 2024-12-08T00:49:26,836 INFO [RS:0;0f983e3e5be1:46417 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T00:49:26,836 INFO [RS:0;0f983e3e5be1:46417 {}] regionserver.HRegionServer(1031): Exiting; stopping=0f983e3e5be1,46417,1733618936694; zookeeper connection closed. 2024-12-08T00:49:26,836 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46417-0x10002f3693d0001, quorum=127.0.0.1:55980, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T00:49:26,836 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46417-0x10002f3693d0001, quorum=127.0.0.1:55980, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T00:49:26,837 DEBUG [M:0;0f983e3e5be1:40363 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/92f29d92f3054233b6aad96247de7623 is 69, key is 0f983e3e5be1,46417,1733618936694/rs:state/1733618937161/Put/seqid=0 2024-12-08T00:49:26,863 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5ff76fa3 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5ff76fa3 2024-12-08T00:49:26,863 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-08T00:49:26,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40137 is added to blk_1073741850_1035 (size=5156) 2024-12-08T00:49:26,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39967 is added to blk_1073741850_1035 (size=5156) 2024-12-08T00:49:26,868 INFO [M:0;0f983e3e5be1:40363 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/92f29d92f3054233b6aad96247de7623 2024-12-08T00:49:26,885 DEBUG [M:0;0f983e3e5be1:40363 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/8a204bca43e44c269f3ff9d8bc090851 is 52, key is load_balancer_on/state:d/1733618937827/Put/seqid=0 2024-12-08T00:49:26,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39967 is added to blk_1073741851_1036 (size=5056) 2024-12-08T00:49:26,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40137 is added to blk_1073741851_1036 (size=5056) 2024-12-08T00:49:26,889 INFO [M:0;0f983e3e5be1:40363 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/8a204bca43e44c269f3ff9d8bc090851 2024-12-08T00:49:26,895 DEBUG [M:0;0f983e3e5be1:40363 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ecbd83118d1440b4a34a92b5acbb534d as hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ecbd83118d1440b4a34a92b5acbb534d 2024-12-08T00:49:26,900 INFO [M:0;0f983e3e5be1:40363 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ecbd83118d1440b4a34a92b5acbb534d, entries=8, sequenceid=56, filesize=5.5 K 2024-12-08T00:49:26,900 DEBUG [M:0;0f983e3e5be1:40363 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/da2d8c4e446a48cabc10c3f65a7a59f9 as hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/da2d8c4e446a48cabc10c3f65a7a59f9 2024-12-08T00:49:26,905 INFO [M:0;0f983e3e5be1:40363 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/da2d8c4e446a48cabc10c3f65a7a59f9, entries=6, sequenceid=56, filesize=6.0 K 2024-12-08T00:49:26,906 DEBUG [M:0;0f983e3e5be1:40363 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/92f29d92f3054233b6aad96247de7623 as hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/92f29d92f3054233b6aad96247de7623 2024-12-08T00:49:26,910 INFO [M:0;0f983e3e5be1:40363 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/92f29d92f3054233b6aad96247de7623, entries=1, sequenceid=56, filesize=5.0 K 2024-12-08T00:49:26,911 DEBUG [M:0;0f983e3e5be1:40363 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/8a204bca43e44c269f3ff9d8bc090851 as hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/8a204bca43e44c269f3ff9d8bc090851 2024-12-08T00:49:26,916 INFO [M:0;0f983e3e5be1:40363 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/8a204bca43e44c269f3ff9d8bc090851, entries=1, sequenceid=56, filesize=4.9 K 2024-12-08T00:49:26,917 INFO [M:0;0f983e3e5be1:40363 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.17 KB/23726, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 156ms, sequenceid=56, compaction requested=false 2024-12-08T00:49:26,918 INFO [M:0;0f983e3e5be1:40363 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:49:26,918 DEBUG [M:0;0f983e3e5be1:40363 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733618966761Disabling compacts and flushes for region at 1733618966761Disabling writes for close at 1733618966761Obtaining lock to block concurrent updates at 1733618966761Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733618966761Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23726, getHeapSize=29800, getOffHeapSize=0, getCellsCount=67 at 1733618966762 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733618966771 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733618966771Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733618966787 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733618966787Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733618966796 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733618966809 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733618966809Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733618966819 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733618966836 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733618966836Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733618966872 (+36 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733618966884 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733618966884Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@694029e9: reopening flushed file at 1733618966894 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@37429ea4: reopening flushed file at 1733618966900 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2cf01972: reopening flushed file at 1733618966905 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@63babd27: reopening flushed file at 1733618966910 (+5 ms)Finished flush of dataSize ~23.17 KB/23726, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 156ms, sequenceid=56, compaction requested=false at 1733618966917 (+7 ms)Writing region close event to WAL at 1733618966918 (+1 ms)Closed at 1733618966918 2024-12-08T00:49:26,918 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:49:26,918 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:49:26,918 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:49:26,918 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:49:26,919 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:49:26,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40137 is added to blk_1073741847_1031 (size=757) 2024-12-08T00:49:26,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39967 is added to blk_1073741847_1031 (size=757) 2024-12-08T00:49:27,348 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:27,390 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:27,841 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:27,841 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:27,859 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:27,860 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:27,860 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:27,860 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:27,860 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:27,860 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:27,862 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:27,862 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:27,862 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:27,864 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:27,866 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:27,867 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:28,349 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:28,370 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-08T00:49:28,372 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:28,373 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:28,374 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:28,374 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:28,391 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:28,391 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:28,392 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:28,392 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:28,392 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:28,392 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:28,392 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:28,394 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:28,395 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:28,395 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:28,396 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:28,895 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1015: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-08T00:49:29,351 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:29,392 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:30,352 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:30,393 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:30,772 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/MasterData/WALs/0f983e3e5be1,40363,1733618936495/0f983e3e5be1%2C40363%2C1733618936495.1733618936831 after 4002ms 2024-12-08T00:49:30,773 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/MasterData/WALs/0f983e3e5be1,40363,1733618936495/0f983e3e5be1%2C40363%2C1733618936495.1733618936831 to hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/MasterData/oldWALs/0f983e3e5be1%2C40363%2C1733618936495.1733618936831 2024-12-08T00:49:30,778 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/MasterData/oldWALs/0f983e3e5be1%2C40363%2C1733618936495.1733618936831 to hdfs://localhost:37255/user/jenkins/test-data/97a2f35d-5970-6611-f647-6a781dc9935a/oldWALs/0f983e3e5be1%2C40363%2C1733618936495.1733618936831$masterlocalwal$ 2024-12-08T00:49:30,779 INFO [M:0;0f983e3e5be1:40363 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-08T00:49:30,779 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T00:49:30,779 INFO [M:0;0f983e3e5be1:40363 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40363 2024-12-08T00:49:30,779 INFO [M:0;0f983e3e5be1:40363 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T00:49:30,936 INFO [M:0;0f983e3e5be1:40363 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T00:49:30,936 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40363-0x10002f3693d0000, quorum=127.0.0.1:55980, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T00:49:30,936 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40363-0x10002f3693d0000, quorum=127.0.0.1:55980, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T00:49:30,973 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4a5bd99d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T00:49:30,973 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@77746ad2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T00:49:30,973 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T00:49:30,973 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@17233de7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T00:49:30,973 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@34137c70{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/982cec9f-9f6b-3bb2-8eb0-96cfa4313788/hadoop.log.dir/,STOPPED} 2024-12-08T00:49:30,974 WARN [BP-2043044047-172.17.0.2-1733618934874 heartbeating to localhost/127.0.0.1:37255 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T00:49:30,974 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T00:49:30,974 WARN [BP-2043044047-172.17.0.2-1733618934874 heartbeating to localhost/127.0.0.1:37255 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2043044047-172.17.0.2-1733618934874 (Datanode Uuid 5584fa3b-e14c-4e79-ba2e-d4b3219a978d) service to localhost/127.0.0.1:37255 2024-12-08T00:49:30,974 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T00:49:30,975 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/982cec9f-9f6b-3bb2-8eb0-96cfa4313788/cluster_c1909289-147f-3345-59fd-781f6482af86/data/data3/current/BP-2043044047-172.17.0.2-1733618934874 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T00:49:30,975 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/982cec9f-9f6b-3bb2-8eb0-96cfa4313788/cluster_c1909289-147f-3345-59fd-781f6482af86/data/data4/current/BP-2043044047-172.17.0.2-1733618934874 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T00:49:30,975 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T00:49:30,977 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@51f33716{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T00:49:30,978 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@75d1d597{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T00:49:30,978 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T00:49:30,978 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47645a2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T00:49:30,978 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@54680a3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/982cec9f-9f6b-3bb2-8eb0-96cfa4313788/hadoop.log.dir/,STOPPED} 2024-12-08T00:49:30,979 WARN [BP-2043044047-172.17.0.2-1733618934874 heartbeating to localhost/127.0.0.1:37255 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T00:49:30,979 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T00:49:30,979 WARN [BP-2043044047-172.17.0.2-1733618934874 heartbeating to localhost/127.0.0.1:37255 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2043044047-172.17.0.2-1733618934874 (Datanode Uuid 79655df2-a483-4591-9bcf-bcb8d8bbb0fa) service to localhost/127.0.0.1:37255 2024-12-08T00:49:30,979 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T00:49:30,979 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/982cec9f-9f6b-3bb2-8eb0-96cfa4313788/cluster_c1909289-147f-3345-59fd-781f6482af86/data/data1/current/BP-2043044047-172.17.0.2-1733618934874 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T00:49:30,980 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/982cec9f-9f6b-3bb2-8eb0-96cfa4313788/cluster_c1909289-147f-3345-59fd-781f6482af86/data/data2/current/BP-2043044047-172.17.0.2-1733618934874 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T00:49:30,980 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T00:49:30,985 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@36ee8469{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-08T00:49:30,985 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@41098146{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T00:49:30,985 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T00:49:30,986 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@38bb274c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T00:49:30,986 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2aa6e3f6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/982cec9f-9f6b-3bb2-8eb0-96cfa4313788/hadoop.log.dir/,STOPPED} 2024-12-08T00:49:30,992 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-08T00:49:31,011 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-08T00:49:31,017 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=182 (was 157) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:37255 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:37255 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:37255 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37255 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:37255 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:37255 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37255 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37255 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=457 (was 450) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=14 (was 26), ProcessCount=11 (was 11), AvailableMemoryMB=17326 (was 17477) 2024-12-08T00:49:31,024 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=182, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=14, ProcessCount=11, AvailableMemoryMB=17325 2024-12-08T00:49:31,024 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-08T00:49:31,024 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/982cec9f-9f6b-3bb2-8eb0-96cfa4313788/hadoop.log.dir so I do NOT create it in target/test-data/4b762e3f-9801-d913-5546-f2f7c758b88c 2024-12-08T00:49:31,024 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/982cec9f-9f6b-3bb2-8eb0-96cfa4313788/hadoop.tmp.dir so I do NOT create it in target/test-data/4b762e3f-9801-d913-5546-f2f7c758b88c 2024-12-08T00:49:31,024 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b762e3f-9801-d913-5546-f2f7c758b88c/cluster_190049d3-13ea-7573-72c2-be467c979f0f, deleteOnExit=true 2024-12-08T00:49:31,024 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-08T00:49:31,025 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b762e3f-9801-d913-5546-f2f7c758b88c/test.cache.data in system properties and HBase conf 2024-12-08T00:49:31,025 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b762e3f-9801-d913-5546-f2f7c758b88c/hadoop.tmp.dir in system properties and HBase conf 2024-12-08T00:49:31,025 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b762e3f-9801-d913-5546-f2f7c758b88c/hadoop.log.dir in system properties and HBase conf 2024-12-08T00:49:31,025 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b762e3f-9801-d913-5546-f2f7c758b88c/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-08T00:49:31,025 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b762e3f-9801-d913-5546-f2f7c758b88c/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-08T00:49:31,025 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-08T00:49:31,025 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-08T00:49:31,025 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b762e3f-9801-d913-5546-f2f7c758b88c/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-08T00:49:31,025 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b762e3f-9801-d913-5546-f2f7c758b88c/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-08T00:49:31,025 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b762e3f-9801-d913-5546-f2f7c758b88c/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-08T00:49:31,025 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b762e3f-9801-d913-5546-f2f7c758b88c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-08T00:49:31,025 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b762e3f-9801-d913-5546-f2f7c758b88c/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-08T00:49:31,026 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b762e3f-9801-d913-5546-f2f7c758b88c/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-08T00:49:31,026 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b762e3f-9801-d913-5546-f2f7c758b88c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-08T00:49:31,026 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b762e3f-9801-d913-5546-f2f7c758b88c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-08T00:49:31,026 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b762e3f-9801-d913-5546-f2f7c758b88c/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-08T00:49:31,026 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b762e3f-9801-d913-5546-f2f7c758b88c/nfs.dump.dir in system properties and HBase conf 2024-12-08T00:49:31,026 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b762e3f-9801-d913-5546-f2f7c758b88c/java.io.tmpdir in system properties and HBase conf 2024-12-08T00:49:31,026 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b762e3f-9801-d913-5546-f2f7c758b88c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-08T00:49:31,026 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b762e3f-9801-d913-5546-f2f7c758b88c/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-08T00:49:31,026 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b762e3f-9801-d913-5546-f2f7c758b88c/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-08T00:49:31,037 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-08T00:49:31,302 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T00:49:31,305 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T00:49:31,307 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T00:49:31,307 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T00:49:31,307 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T00:49:31,307 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T00:49:31,311 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3804bd01{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b762e3f-9801-d913-5546-f2f7c758b88c/hadoop.log.dir/,AVAILABLE} 2024-12-08T00:49:31,311 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7ca05505{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T00:49:31,353 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:31,393 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:31,400 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1aa3c43{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b762e3f-9801-d913-5546-f2f7c758b88c/java.io.tmpdir/jetty-localhost-32939-hadoop-hdfs-3_4_1-tests_jar-_-any-11919235245145357459/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-08T00:49:31,400 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@504cda16{HTTP/1.1, (http/1.1)}{localhost:32939} 2024-12-08T00:49:31,400 INFO [Time-limited test {}] server.Server(415): Started @190830ms 2024-12-08T00:49:31,410 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-08T00:49:31,617 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T00:49:31,622 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T00:49:31,623 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T00:49:31,623 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T00:49:31,623 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T00:49:31,623 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@460100b8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b762e3f-9801-d913-5546-f2f7c758b88c/hadoop.log.dir/,AVAILABLE} 2024-12-08T00:49:31,624 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@33ab71f5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T00:49:31,714 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@15cc44b6{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b762e3f-9801-d913-5546-f2f7c758b88c/java.io.tmpdir/jetty-localhost-34485-hadoop-hdfs-3_4_1-tests_jar-_-any-9847482297231043701/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T00:49:31,714 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@31ad985a{HTTP/1.1, (http/1.1)}{localhost:34485} 2024-12-08T00:49:31,714 INFO [Time-limited test {}] server.Server(415): Started @191144ms 2024-12-08T00:49:31,715 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T00:49:31,738 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T00:49:31,741 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T00:49:31,741 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T00:49:31,741 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T00:49:31,741 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T00:49:31,742 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7b7ba33e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b762e3f-9801-d913-5546-f2f7c758b88c/hadoop.log.dir/,AVAILABLE} 2024-12-08T00:49:31,742 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2635e80{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T00:49:31,833 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@433a8e25{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b762e3f-9801-d913-5546-f2f7c758b88c/java.io.tmpdir/jetty-localhost-36973-hadoop-hdfs-3_4_1-tests_jar-_-any-10925692322003349264/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T00:49:31,833 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@570a7045{HTTP/1.1, (http/1.1)}{localhost:36973} 2024-12-08T00:49:31,833 INFO [Time-limited test {}] server.Server(415): Started @191263ms 2024-12-08T00:49:31,834 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T00:49:32,354 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:32,393 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:32,506 WARN [Thread-1643 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b762e3f-9801-d913-5546-f2f7c758b88c/cluster_190049d3-13ea-7573-72c2-be467c979f0f/data/data1/current/BP-1390618451-172.17.0.2-1733618971046/current, will proceed with Du for space computation calculation, 2024-12-08T00:49:32,506 WARN [Thread-1644 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b762e3f-9801-d913-5546-f2f7c758b88c/cluster_190049d3-13ea-7573-72c2-be467c979f0f/data/data2/current/BP-1390618451-172.17.0.2-1733618971046/current, will proceed with Du for space computation calculation, 2024-12-08T00:49:32,522 WARN [Thread-1607 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T00:49:32,524 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf39dd8ddc13a2788 with lease ID 0x8595287c6b1b0eb7: Processing first storage report for DS-31523f94-b9de-49e4-bc85-478d5282dbb4 from datanode DatanodeRegistration(127.0.0.1:43933, datanodeUuid=f974d580-cfef-48c3-a483-6ed378ef3415, infoPort=45331, infoSecurePort=0, ipcPort=40455, storageInfo=lv=-57;cid=testClusterID;nsid=1837856264;c=1733618971046) 2024-12-08T00:49:32,524 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf39dd8ddc13a2788 with lease ID 0x8595287c6b1b0eb7: from storage DS-31523f94-b9de-49e4-bc85-478d5282dbb4 node DatanodeRegistration(127.0.0.1:43933, datanodeUuid=f974d580-cfef-48c3-a483-6ed378ef3415, infoPort=45331, infoSecurePort=0, ipcPort=40455, storageInfo=lv=-57;cid=testClusterID;nsid=1837856264;c=1733618971046), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T00:49:32,524 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf39dd8ddc13a2788 with lease ID 0x8595287c6b1b0eb7: Processing first storage report for DS-61a2f2a7-ca0d-4b4e-b93e-bf1044350eba from datanode DatanodeRegistration(127.0.0.1:43933, datanodeUuid=f974d580-cfef-48c3-a483-6ed378ef3415, infoPort=45331, infoSecurePort=0, ipcPort=40455, storageInfo=lv=-57;cid=testClusterID;nsid=1837856264;c=1733618971046) 2024-12-08T00:49:32,525 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf39dd8ddc13a2788 with lease ID 0x8595287c6b1b0eb7: from storage DS-61a2f2a7-ca0d-4b4e-b93e-bf1044350eba node DatanodeRegistration(127.0.0.1:43933, datanodeUuid=f974d580-cfef-48c3-a483-6ed378ef3415, infoPort=45331, infoSecurePort=0, ipcPort=40455, storageInfo=lv=-57;cid=testClusterID;nsid=1837856264;c=1733618971046), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T00:49:32,620 WARN [Thread-1654 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b762e3f-9801-d913-5546-f2f7c758b88c/cluster_190049d3-13ea-7573-72c2-be467c979f0f/data/data3/current/BP-1390618451-172.17.0.2-1733618971046/current, will proceed with Du for space computation calculation, 2024-12-08T00:49:32,620 WARN [Thread-1655 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b762e3f-9801-d913-5546-f2f7c758b88c/cluster_190049d3-13ea-7573-72c2-be467c979f0f/data/data4/current/BP-1390618451-172.17.0.2-1733618971046/current, will proceed with Du for space computation calculation, 2024-12-08T00:49:32,637 WARN [Thread-1630 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T00:49:32,639 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x48c523acc8c234d with lease ID 0x8595287c6b1b0eb8: Processing first storage report for DS-82db35bf-24f1-4828-bc46-45285960ae24 from datanode DatanodeRegistration(127.0.0.1:37845, datanodeUuid=52f0eeca-55bd-4cb1-8775-ea5a074d580f, infoPort=46007, infoSecurePort=0, ipcPort=45111, storageInfo=lv=-57;cid=testClusterID;nsid=1837856264;c=1733618971046) 2024-12-08T00:49:32,639 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x48c523acc8c234d with lease ID 0x8595287c6b1b0eb8: from storage DS-82db35bf-24f1-4828-bc46-45285960ae24 node DatanodeRegistration(127.0.0.1:37845, datanodeUuid=52f0eeca-55bd-4cb1-8775-ea5a074d580f, infoPort=46007, infoSecurePort=0, ipcPort=45111, storageInfo=lv=-57;cid=testClusterID;nsid=1837856264;c=1733618971046), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T00:49:32,639 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x48c523acc8c234d with lease ID 0x8595287c6b1b0eb8: Processing first storage report for DS-16d900a2-03b2-479e-84e4-36109a3044ce from datanode DatanodeRegistration(127.0.0.1:37845, datanodeUuid=52f0eeca-55bd-4cb1-8775-ea5a074d580f, infoPort=46007, infoSecurePort=0, ipcPort=45111, storageInfo=lv=-57;cid=testClusterID;nsid=1837856264;c=1733618971046) 2024-12-08T00:49:32,639 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x48c523acc8c234d with lease ID 0x8595287c6b1b0eb8: from storage DS-16d900a2-03b2-479e-84e4-36109a3044ce node DatanodeRegistration(127.0.0.1:37845, datanodeUuid=52f0eeca-55bd-4cb1-8775-ea5a074d580f, infoPort=46007, infoSecurePort=0, ipcPort=45111, storageInfo=lv=-57;cid=testClusterID;nsid=1837856264;c=1733618971046), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T00:49:32,658 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b762e3f-9801-d913-5546-f2f7c758b88c 2024-12-08T00:49:32,661 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b762e3f-9801-d913-5546-f2f7c758b88c/cluster_190049d3-13ea-7573-72c2-be467c979f0f/zookeeper_0, clientPort=62505, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b762e3f-9801-d913-5546-f2f7c758b88c/cluster_190049d3-13ea-7573-72c2-be467c979f0f/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b762e3f-9801-d913-5546-f2f7c758b88c/cluster_190049d3-13ea-7573-72c2-be467c979f0f/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-08T00:49:32,662 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=62505 2024-12-08T00:49:32,662 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:49:32,663 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:49:32,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37845 is added to blk_1073741825_1001 (size=7) 2024-12-08T00:49:32,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43933 is added to blk_1073741825_1001 (size=7) 2024-12-08T00:49:32,677 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0 with version=8 2024-12-08T00:49:32,677 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/hbase-staging 2024-12-08T00:49:32,679 INFO [Time-limited test {}] client.ConnectionUtils(128): master/0f983e3e5be1:0 server-side Connection retries=45 2024-12-08T00:49:32,679 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T00:49:32,679 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T00:49:32,679 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T00:49:32,679 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T00:49:32,679 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T00:49:32,679 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-08T00:49:32,679 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T00:49:32,680 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42913 2024-12-08T00:49:32,682 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:42913 connecting to ZooKeeper ensemble=127.0.0.1:62505 2024-12-08T00:49:32,733 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:429130x0, quorum=127.0.0.1:62505, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T00:49:32,734 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:42913-0x10002f3f6900000 connected 2024-12-08T00:49:32,802 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:49:32,806 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:49:32,809 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42913-0x10002f3f6900000, quorum=127.0.0.1:62505, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T00:49:32,809 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0, hbase.cluster.distributed=false 2024-12-08T00:49:32,812 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42913-0x10002f3f6900000, quorum=127.0.0.1:62505, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T00:49:32,812 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42913 2024-12-08T00:49:32,812 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42913 2024-12-08T00:49:32,813 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42913 2024-12-08T00:49:32,813 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42913 2024-12-08T00:49:32,814 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42913 2024-12-08T00:49:32,831 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/0f983e3e5be1:0 server-side Connection retries=45 2024-12-08T00:49:32,831 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T00:49:32,831 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T00:49:32,832 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T00:49:32,832 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T00:49:32,832 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T00:49:32,832 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-08T00:49:32,832 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T00:49:32,832 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35097 2024-12-08T00:49:32,833 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35097 connecting to ZooKeeper ensemble=127.0.0.1:62505 2024-12-08T00:49:32,834 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:49:32,835 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:49:32,843 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:350970x0, quorum=127.0.0.1:62505, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T00:49:32,843 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35097-0x10002f3f6900001 connected 2024-12-08T00:49:32,843 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35097-0x10002f3f6900001, quorum=127.0.0.1:62505, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T00:49:32,843 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-08T00:49:32,844 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-08T00:49:32,844 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35097-0x10002f3f6900001, quorum=127.0.0.1:62505, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-08T00:49:32,845 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35097-0x10002f3f6900001, quorum=127.0.0.1:62505, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T00:49:32,845 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35097 2024-12-08T00:49:32,845 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35097 2024-12-08T00:49:32,847 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35097 2024-12-08T00:49:32,847 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35097 2024-12-08T00:49:32,847 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35097 2024-12-08T00:49:32,858 DEBUG [M:0;0f983e3e5be1:42913 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;0f983e3e5be1:42913 2024-12-08T00:49:32,858 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/0f983e3e5be1,42913,1733618972678 2024-12-08T00:49:32,868 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42913-0x10002f3f6900000, quorum=127.0.0.1:62505, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T00:49:32,868 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35097-0x10002f3f6900001, quorum=127.0.0.1:62505, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T00:49:32,868 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42913-0x10002f3f6900000, quorum=127.0.0.1:62505, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/0f983e3e5be1,42913,1733618972678 2024-12-08T00:49:32,876 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35097-0x10002f3f6900001, quorum=127.0.0.1:62505, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-08T00:49:32,876 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42913-0x10002f3f6900000, quorum=127.0.0.1:62505, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:49:32,876 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35097-0x10002f3f6900001, quorum=127.0.0.1:62505, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:49:32,876 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42913-0x10002f3f6900000, quorum=127.0.0.1:62505, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-08T00:49:32,877 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/0f983e3e5be1,42913,1733618972678 from backup master directory 2024-12-08T00:49:32,884 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35097-0x10002f3f6900001, quorum=127.0.0.1:62505, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T00:49:32,884 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42913-0x10002f3f6900000, quorum=127.0.0.1:62505, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/0f983e3e5be1,42913,1733618972678 2024-12-08T00:49:32,884 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42913-0x10002f3f6900000, quorum=127.0.0.1:62505, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T00:49:32,884 WARN [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T00:49:32,885 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=0f983e3e5be1,42913,1733618972678 2024-12-08T00:49:32,891 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/hbase.id] with ID: 2d6eaef1-ea9e-4c8d-ae02-7a706e4bfc17 2024-12-08T00:49:32,891 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/.tmp/hbase.id 2024-12-08T00:49:32,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43933 is added to blk_1073741826_1002 (size=42) 2024-12-08T00:49:32,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37845 is added to blk_1073741826_1002 (size=42) 2024-12-08T00:49:32,900 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/.tmp/hbase.id]:[hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/hbase.id] 2024-12-08T00:49:32,915 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:49:32,915 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-08T00:49:32,916 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-08T00:49:32,926 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42913-0x10002f3f6900000, quorum=127.0.0.1:62505, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:49:32,926 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35097-0x10002f3f6900001, quorum=127.0.0.1:62505, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:49:32,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43933 is added to blk_1073741827_1003 (size=196) 2024-12-08T00:49:32,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37845 is added to blk_1073741827_1003 (size=196) 2024-12-08T00:49:32,933 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T00:49:32,934 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-08T00:49:32,934 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T00:49:32,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43933 is added to blk_1073741828_1004 (size=1189) 2024-12-08T00:49:32,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37845 is added to blk_1073741828_1004 (size=1189) 2024-12-08T00:49:32,945 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/MasterData/data/master/store 2024-12-08T00:49:32,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37845 is added to blk_1073741829_1005 (size=34) 2024-12-08T00:49:32,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43933 is added to blk_1073741829_1005 (size=34) 2024-12-08T00:49:32,952 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:49:32,952 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-08T00:49:32,952 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:49:32,952 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:49:32,952 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-08T00:49:32,952 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:49:32,952 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:49:32,952 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733618972952Disabling compacts and flushes for region at 1733618972952Disabling writes for close at 1733618972952Writing region close event to WAL at 1733618972952Closed at 1733618972952 2024-12-08T00:49:32,953 WARN [master/0f983e3e5be1:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/MasterData/data/master/store/.initializing 2024-12-08T00:49:32,953 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/MasterData/WALs/0f983e3e5be1,42913,1733618972678 2024-12-08T00:49:32,955 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0f983e3e5be1%2C42913%2C1733618972678, suffix=, logDir=hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/MasterData/WALs/0f983e3e5be1,42913,1733618972678, archiveDir=hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/MasterData/oldWALs, maxLogs=10 2024-12-08T00:49:32,956 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0f983e3e5be1%2C42913%2C1733618972678.1733618972956 2024-12-08T00:49:32,960 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/MasterData/WALs/0f983e3e5be1,42913,1733618972678/0f983e3e5be1%2C42913%2C1733618972678.1733618972956 2024-12-08T00:49:32,961 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45331:45331),(127.0.0.1/127.0.0.1:46007:46007)] 2024-12-08T00:49:32,961 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-08T00:49:32,962 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:49:32,962 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:49:32,962 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:49:32,963 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:49:32,964 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-08T00:49:32,964 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:49:32,964 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:49:32,965 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:49:32,965 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-08T00:49:32,965 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:49:32,966 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T00:49:32,966 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:49:32,967 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-08T00:49:32,967 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:49:32,967 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T00:49:32,967 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:49:32,969 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-08T00:49:32,969 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:49:32,969 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T00:49:32,969 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:49:32,970 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:49:32,970 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:49:32,972 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:49:32,972 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:49:32,973 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-08T00:49:32,975 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:49:32,977 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T00:49:32,978 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=782597, jitterRate=-0.004877328872680664}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-08T00:49:32,978 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733618972962Initializing all the Stores at 1733618972962Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733618972962Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733618972963 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733618972963Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733618972963Cleaning up temporary data from old regions at 1733618972972 (+9 ms)Region opened successfully at 1733618972978 (+6 ms) 2024-12-08T00:49:32,979 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-08T00:49:32,983 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c4dafff, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0f983e3e5be1/172.17.0.2:0 2024-12-08T00:49:32,984 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-08T00:49:32,984 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-08T00:49:32,984 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-08T00:49:32,984 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-08T00:49:32,985 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-08T00:49:32,985 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-08T00:49:32,985 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-08T00:49:32,988 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-08T00:49:32,988 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42913-0x10002f3f6900000, quorum=127.0.0.1:62505, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-08T00:49:33,001 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-08T00:49:33,001 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-08T00:49:33,002 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42913-0x10002f3f6900000, quorum=127.0.0.1:62505, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-08T00:49:33,009 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-08T00:49:33,010 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-08T00:49:33,011 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42913-0x10002f3f6900000, quorum=127.0.0.1:62505, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-08T00:49:33,018 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-08T00:49:33,019 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42913-0x10002f3f6900000, quorum=127.0.0.1:62505, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-08T00:49:33,026 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-08T00:49:33,029 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42913-0x10002f3f6900000, quorum=127.0.0.1:62505, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-08T00:49:33,034 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-08T00:49:33,043 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35097-0x10002f3f6900001, quorum=127.0.0.1:62505, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T00:49:33,043 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42913-0x10002f3f6900000, quorum=127.0.0.1:62505, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T00:49:33,043 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35097-0x10002f3f6900001, quorum=127.0.0.1:62505, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:49:33,043 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42913-0x10002f3f6900000, quorum=127.0.0.1:62505, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:49:33,044 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=0f983e3e5be1,42913,1733618972678, sessionid=0x10002f3f6900000, setting cluster-up flag (Was=false) 2024-12-08T00:49:33,060 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42913-0x10002f3f6900000, quorum=127.0.0.1:62505, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:49:33,060 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35097-0x10002f3f6900001, quorum=127.0.0.1:62505, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:49:33,084 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-08T00:49:33,085 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0f983e3e5be1,42913,1733618972678 2024-12-08T00:49:33,101 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35097-0x10002f3f6900001, quorum=127.0.0.1:62505, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:49:33,101 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42913-0x10002f3f6900000, quorum=127.0.0.1:62505, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:49:33,126 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-08T00:49:33,129 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0f983e3e5be1,42913,1733618972678 2024-12-08T00:49:33,132 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-08T00:49:33,136 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-08T00:49:33,136 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-08T00:49:33,137 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-08T00:49:33,138 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 0f983e3e5be1,42913,1733618972678 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-08T00:49:33,139 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/0f983e3e5be1:0, corePoolSize=5, maxPoolSize=5 2024-12-08T00:49:33,139 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/0f983e3e5be1:0, corePoolSize=5, maxPoolSize=5 2024-12-08T00:49:33,140 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/0f983e3e5be1:0, corePoolSize=5, maxPoolSize=5 2024-12-08T00:49:33,140 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/0f983e3e5be1:0, corePoolSize=5, maxPoolSize=5 2024-12-08T00:49:33,140 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/0f983e3e5be1:0, corePoolSize=10, maxPoolSize=10 2024-12-08T00:49:33,140 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:49:33,140 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/0f983e3e5be1:0, corePoolSize=2, maxPoolSize=2 2024-12-08T00:49:33,140 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:49:33,140 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733619003140 2024-12-08T00:49:33,140 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-08T00:49:33,141 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-08T00:49:33,141 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-08T00:49:33,141 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-08T00:49:33,141 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-08T00:49:33,141 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-08T00:49:33,141 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-08T00:49:33,141 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-08T00:49:33,141 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-08T00:49:33,141 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T00:49:33,141 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-08T00:49:33,141 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-08T00:49:33,142 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-08T00:49:33,142 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-08T00:49:33,142 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/0f983e3e5be1:0:becomeActiveMaster-HFileCleaner.large.0-1733618973142,5,FailOnTimeoutGroup] 2024-12-08T00:49:33,142 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/0f983e3e5be1:0:becomeActiveMaster-HFileCleaner.small.0-1733618973142,5,FailOnTimeoutGroup] 2024-12-08T00:49:33,142 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-08T00:49:33,142 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-08T00:49:33,142 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-08T00:49:33,142 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-08T00:49:33,143 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:49:33,143 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-08T00:49:33,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43933 is added to blk_1073741831_1007 (size=1321) 2024-12-08T00:49:33,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37845 is added to blk_1073741831_1007 (size=1321) 2024-12-08T00:49:33,149 INFO [RS:0;0f983e3e5be1:35097 {}] regionserver.HRegionServer(746): ClusterId : 2d6eaef1-ea9e-4c8d-ae02-7a706e4bfc17 2024-12-08T00:49:33,149 DEBUG [RS:0;0f983e3e5be1:35097 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-08T00:49:33,150 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-08T00:49:33,150 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0 2024-12-08T00:49:33,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43933 is added to blk_1073741832_1008 (size=32) 2024-12-08T00:49:33,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37845 is added to blk_1073741832_1008 (size=32) 2024-12-08T00:49:33,156 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:49:33,158 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-08T00:49:33,159 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-08T00:49:33,159 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:49:33,159 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:49:33,159 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-08T00:49:33,160 DEBUG [RS:0;0f983e3e5be1:35097 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-08T00:49:33,160 DEBUG [RS:0;0f983e3e5be1:35097 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-08T00:49:33,161 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-08T00:49:33,161 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:49:33,161 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:49:33,161 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-08T00:49:33,162 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-08T00:49:33,162 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:49:33,163 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:49:33,163 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-08T00:49:33,164 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-08T00:49:33,164 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:49:33,164 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:49:33,165 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-08T00:49:33,165 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/data/hbase/meta/1588230740 2024-12-08T00:49:33,165 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/data/hbase/meta/1588230740 2024-12-08T00:49:33,167 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-08T00:49:33,167 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-08T00:49:33,167 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-08T00:49:33,168 DEBUG [RS:0;0f983e3e5be1:35097 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-08T00:49:33,169 DEBUG [RS:0;0f983e3e5be1:35097 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@14dc4e0f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0f983e3e5be1/172.17.0.2:0 2024-12-08T00:49:33,169 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-08T00:49:33,171 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T00:49:33,172 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=783612, jitterRate=-0.00358600914478302}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-08T00:49:33,172 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733618973156Initializing all the Stores at 1733618973157 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733618973157Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733618973157Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733618973157Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733618973157Cleaning up temporary data from old regions at 1733618973167 (+10 ms)Region opened successfully at 1733618973172 (+5 ms) 2024-12-08T00:49:33,172 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-08T00:49:33,172 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-08T00:49:33,173 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-08T00:49:33,173 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-08T00:49:33,173 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-08T00:49:33,173 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-08T00:49:33,173 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733618973172Disabling compacts and flushes for region at 1733618973172Disabling writes for close at 1733618973173 (+1 ms)Writing region close event to WAL at 1733618973173Closed at 1733618973173 2024-12-08T00:49:33,174 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T00:49:33,174 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-08T00:49:33,175 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-08T00:49:33,176 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-08T00:49:33,177 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-08T00:49:33,183 DEBUG [RS:0;0f983e3e5be1:35097 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;0f983e3e5be1:35097 2024-12-08T00:49:33,183 INFO [RS:0;0f983e3e5be1:35097 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-08T00:49:33,183 INFO [RS:0;0f983e3e5be1:35097 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-08T00:49:33,183 DEBUG [RS:0;0f983e3e5be1:35097 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-08T00:49:33,184 INFO [RS:0;0f983e3e5be1:35097 {}] regionserver.HRegionServer(2659): reportForDuty to master=0f983e3e5be1,42913,1733618972678 with port=35097, startcode=1733618972831 2024-12-08T00:49:33,184 DEBUG [RS:0;0f983e3e5be1:35097 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-08T00:49:33,185 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45073, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-08T00:49:33,186 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42913 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 0f983e3e5be1,35097,1733618972831 2024-12-08T00:49:33,186 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42913 {}] master.ServerManager(517): Registering regionserver=0f983e3e5be1,35097,1733618972831 2024-12-08T00:49:33,187 DEBUG [RS:0;0f983e3e5be1:35097 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0 2024-12-08T00:49:33,187 DEBUG [RS:0;0f983e3e5be1:35097 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46567 2024-12-08T00:49:33,187 DEBUG [RS:0;0f983e3e5be1:35097 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-08T00:49:33,192 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42913-0x10002f3f6900000, quorum=127.0.0.1:62505, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T00:49:33,193 DEBUG [RS:0;0f983e3e5be1:35097 {}] zookeeper.ZKUtil(111): regionserver:35097-0x10002f3f6900001, quorum=127.0.0.1:62505, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/0f983e3e5be1,35097,1733618972831 2024-12-08T00:49:33,193 WARN [RS:0;0f983e3e5be1:35097 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T00:49:33,193 INFO [RS:0;0f983e3e5be1:35097 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T00:49:33,193 DEBUG [RS:0;0f983e3e5be1:35097 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/WALs/0f983e3e5be1,35097,1733618972831 2024-12-08T00:49:33,193 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [0f983e3e5be1,35097,1733618972831] 2024-12-08T00:49:33,197 INFO [RS:0;0f983e3e5be1:35097 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-08T00:49:33,198 INFO [RS:0;0f983e3e5be1:35097 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-08T00:49:33,198 INFO [RS:0;0f983e3e5be1:35097 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-08T00:49:33,199 INFO [RS:0;0f983e3e5be1:35097 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T00:49:33,199 INFO [RS:0;0f983e3e5be1:35097 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-08T00:49:33,200 INFO [RS:0;0f983e3e5be1:35097 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-08T00:49:33,200 INFO [RS:0;0f983e3e5be1:35097 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-08T00:49:33,200 DEBUG [RS:0;0f983e3e5be1:35097 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:49:33,200 DEBUG [RS:0;0f983e3e5be1:35097 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:49:33,200 DEBUG [RS:0;0f983e3e5be1:35097 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:49:33,200 DEBUG [RS:0;0f983e3e5be1:35097 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:49:33,200 DEBUG [RS:0;0f983e3e5be1:35097 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:49:33,200 DEBUG [RS:0;0f983e3e5be1:35097 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/0f983e3e5be1:0, corePoolSize=2, maxPoolSize=2 2024-12-08T00:49:33,200 DEBUG [RS:0;0f983e3e5be1:35097 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:49:33,200 DEBUG [RS:0;0f983e3e5be1:35097 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:49:33,200 DEBUG [RS:0;0f983e3e5be1:35097 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:49:33,200 DEBUG [RS:0;0f983e3e5be1:35097 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:49:33,200 DEBUG [RS:0;0f983e3e5be1:35097 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:49:33,200 DEBUG [RS:0;0f983e3e5be1:35097 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:49:33,200 DEBUG [RS:0;0f983e3e5be1:35097 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/0f983e3e5be1:0, corePoolSize=3, maxPoolSize=3 2024-12-08T00:49:33,200 DEBUG [RS:0;0f983e3e5be1:35097 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0, corePoolSize=3, maxPoolSize=3 2024-12-08T00:49:33,201 INFO [RS:0;0f983e3e5be1:35097 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T00:49:33,201 INFO [RS:0;0f983e3e5be1:35097 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T00:49:33,201 INFO [RS:0;0f983e3e5be1:35097 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T00:49:33,201 INFO [RS:0;0f983e3e5be1:35097 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-08T00:49:33,201 INFO [RS:0;0f983e3e5be1:35097 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-08T00:49:33,201 INFO [RS:0;0f983e3e5be1:35097 {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,35097,1733618972831-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T00:49:33,215 INFO [RS:0;0f983e3e5be1:35097 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-08T00:49:33,215 INFO [RS:0;0f983e3e5be1:35097 {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,35097,1733618972831-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T00:49:33,215 INFO [RS:0;0f983e3e5be1:35097 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T00:49:33,215 INFO [RS:0;0f983e3e5be1:35097 {}] regionserver.Replication(171): 0f983e3e5be1,35097,1733618972831 started 2024-12-08T00:49:33,227 INFO [RS:0;0f983e3e5be1:35097 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T00:49:33,227 INFO [RS:0;0f983e3e5be1:35097 {}] regionserver.HRegionServer(1482): Serving as 0f983e3e5be1,35097,1733618972831, RpcServer on 0f983e3e5be1/172.17.0.2:35097, sessionid=0x10002f3f6900001 2024-12-08T00:49:33,227 DEBUG [RS:0;0f983e3e5be1:35097 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-08T00:49:33,227 DEBUG [RS:0;0f983e3e5be1:35097 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 0f983e3e5be1,35097,1733618972831 2024-12-08T00:49:33,227 DEBUG [RS:0;0f983e3e5be1:35097 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0f983e3e5be1,35097,1733618972831' 2024-12-08T00:49:33,227 DEBUG [RS:0;0f983e3e5be1:35097 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-08T00:49:33,228 DEBUG [RS:0;0f983e3e5be1:35097 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-08T00:49:33,228 DEBUG [RS:0;0f983e3e5be1:35097 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-08T00:49:33,228 DEBUG [RS:0;0f983e3e5be1:35097 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-08T00:49:33,228 DEBUG [RS:0;0f983e3e5be1:35097 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 0f983e3e5be1,35097,1733618972831 2024-12-08T00:49:33,228 DEBUG [RS:0;0f983e3e5be1:35097 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0f983e3e5be1,35097,1733618972831' 2024-12-08T00:49:33,228 DEBUG [RS:0;0f983e3e5be1:35097 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-08T00:49:33,229 DEBUG [RS:0;0f983e3e5be1:35097 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-08T00:49:33,229 DEBUG [RS:0;0f983e3e5be1:35097 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-08T00:49:33,229 INFO [RS:0;0f983e3e5be1:35097 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-08T00:49:33,229 INFO [RS:0;0f983e3e5be1:35097 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-08T00:49:33,328 WARN [0f983e3e5be1:42913 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-08T00:49:33,332 INFO [RS:0;0f983e3e5be1:35097 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0f983e3e5be1%2C35097%2C1733618972831, suffix=, logDir=hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/WALs/0f983e3e5be1,35097,1733618972831, archiveDir=hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/oldWALs, maxLogs=32 2024-12-08T00:49:33,332 INFO [RS:0;0f983e3e5be1:35097 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0f983e3e5be1%2C35097%2C1733618972831.1733618973332 2024-12-08T00:49:33,338 INFO [RS:0;0f983e3e5be1:35097 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/WALs/0f983e3e5be1,35097,1733618972831/0f983e3e5be1%2C35097%2C1733618972831.1733618973332 2024-12-08T00:49:33,340 DEBUG [RS:0;0f983e3e5be1:35097 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46007:46007),(127.0.0.1/127.0.0.1:45331:45331)] 2024-12-08T00:49:33,355 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:33,394 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:33,578 DEBUG [0f983e3e5be1:42913 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-08T00:49:33,579 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=0f983e3e5be1,35097,1733618972831 2024-12-08T00:49:33,581 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0f983e3e5be1,35097,1733618972831, state=OPENING 2024-12-08T00:49:33,626 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-08T00:49:33,635 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42913-0x10002f3f6900000, quorum=127.0.0.1:62505, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:49:33,635 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35097-0x10002f3f6900001, quorum=127.0.0.1:62505, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:49:33,636 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-08T00:49:33,636 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T00:49:33,636 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T00:49:33,637 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=0f983e3e5be1,35097,1733618972831}] 2024-12-08T00:49:33,792 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-08T00:49:33,797 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42925, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-08T00:49:33,804 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-08T00:49:33,805 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T00:49:33,808 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0f983e3e5be1%2C35097%2C1733618972831.meta, suffix=.meta, logDir=hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/WALs/0f983e3e5be1,35097,1733618972831, archiveDir=hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/oldWALs, maxLogs=32 2024-12-08T00:49:33,808 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 0f983e3e5be1%2C35097%2C1733618972831.meta.1733618973808.meta 2024-12-08T00:49:33,816 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/WALs/0f983e3e5be1,35097,1733618972831/0f983e3e5be1%2C35097%2C1733618972831.meta.1733618973808.meta 2024-12-08T00:49:33,817 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46007:46007),(127.0.0.1/127.0.0.1:45331:45331)] 2024-12-08T00:49:33,818 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-08T00:49:33,818 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-08T00:49:33,819 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-08T00:49:33,819 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-08T00:49:33,819 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-08T00:49:33,819 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:49:33,819 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-08T00:49:33,819 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-08T00:49:33,821 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-08T00:49:33,822 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-08T00:49:33,822 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:49:33,822 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:49:33,822 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-08T00:49:33,823 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-08T00:49:33,823 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:49:33,824 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:49:33,824 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-08T00:49:33,825 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-08T00:49:33,825 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:49:33,825 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:49:33,825 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-08T00:49:33,826 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-08T00:49:33,826 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:49:33,827 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:49:33,827 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-08T00:49:33,828 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/data/hbase/meta/1588230740 2024-12-08T00:49:33,829 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/data/hbase/meta/1588230740 2024-12-08T00:49:33,830 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-08T00:49:33,830 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-08T00:49:33,831 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-08T00:49:33,833 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-08T00:49:33,833 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=801070, jitterRate=0.01861417293548584}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-08T00:49:33,834 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-08T00:49:33,834 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733618973819Writing region info on filesystem at 1733618973819Initializing all the Stores at 1733618973820 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733618973820Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733618973820Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733618973820Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733618973820Cleaning up temporary data from old regions at 1733618973830 (+10 ms)Running coprocessor post-open hooks at 1733618973834 (+4 ms)Region opened successfully at 1733618973834 2024-12-08T00:49:33,836 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733618973791 2024-12-08T00:49:33,838 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-08T00:49:33,838 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-08T00:49:33,839 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=0f983e3e5be1,35097,1733618972831 2024-12-08T00:49:33,841 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0f983e3e5be1,35097,1733618972831, state=OPEN 2024-12-08T00:49:33,883 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35097-0x10002f3f6900001, quorum=127.0.0.1:62505, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T00:49:33,883 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42913-0x10002f3f6900000, quorum=127.0.0.1:62505, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T00:49:33,883 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=0f983e3e5be1,35097,1733618972831 2024-12-08T00:49:33,883 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T00:49:33,883 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T00:49:33,890 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-08T00:49:33,890 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=0f983e3e5be1,35097,1733618972831 in 247 msec 2024-12-08T00:49:33,895 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-08T00:49:33,895 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 715 msec 2024-12-08T00:49:33,897 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T00:49:33,897 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-08T00:49:33,898 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-08T00:49:33,898 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=0f983e3e5be1,35097,1733618972831, seqNum=-1] 2024-12-08T00:49:33,898 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:49:33,900 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55489, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:49:33,905 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 770 msec 2024-12-08T00:49:33,905 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733618973905, completionTime=-1 2024-12-08T00:49:33,905 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-08T00:49:33,905 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-08T00:49:33,907 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-08T00:49:33,907 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733619033907 2024-12-08T00:49:33,907 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733619093907 2024-12-08T00:49:33,907 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-12-08T00:49:33,907 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,42913,1733618972678-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T00:49:33,907 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,42913,1733618972678-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T00:49:33,907 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,42913,1733618972678-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T00:49:33,907 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-0f983e3e5be1:42913, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T00:49:33,907 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-08T00:49:33,908 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-08T00:49:33,909 DEBUG [master/0f983e3e5be1:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-08T00:49:33,910 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.025sec 2024-12-08T00:49:33,910 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-08T00:49:33,910 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-08T00:49:33,910 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-08T00:49:33,910 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-08T00:49:33,911 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-08T00:49:33,911 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,42913,1733618972678-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T00:49:33,911 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,42913,1733618972678-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-08T00:49:33,913 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-08T00:49:33,913 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-08T00:49:33,913 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,42913,1733618972678-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T00:49:33,950 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@640d397b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:49:33,950 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 0f983e3e5be1,42913,-1 for getting cluster id 2024-12-08T00:49:33,950 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-08T00:49:33,953 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '2d6eaef1-ea9e-4c8d-ae02-7a706e4bfc17' 2024-12-08T00:49:33,953 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-08T00:49:33,953 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "2d6eaef1-ea9e-4c8d-ae02-7a706e4bfc17" 2024-12-08T00:49:33,954 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@a208723, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:49:33,954 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [0f983e3e5be1,42913,-1] 2024-12-08T00:49:33,954 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-08T00:49:33,954 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:49:33,956 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42540, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-08T00:49:33,957 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2ff1c67d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:49:33,958 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-08T00:49:33,959 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=0f983e3e5be1,35097,1733618972831, seqNum=-1] 2024-12-08T00:49:33,960 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:49:33,961 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59494, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:49:33,964 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=0f983e3e5be1,42913,1733618972678 2024-12-08T00:49:33,964 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:49:33,967 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-08T00:49:33,968 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-08T00:49:33,969 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is 0f983e3e5be1,42913,1733618972678 2024-12-08T00:49:33,969 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@59c80080 2024-12-08T00:49:33,969 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-08T00:49:33,970 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42552, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-08T00:49:33,971 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-08T00:49:33,971 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-08T00:49:33,971 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T00:49:33,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-08T00:49:33,975 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-08T00:49:33,975 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:49:33,975 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-12-08T00:49:33,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-08T00:49:33,976 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-08T00:49:33,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37845 is added to blk_1073741835_1011 (size=405) 2024-12-08T00:49:33,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43933 is added to blk_1073741835_1011 (size=405) 2024-12-08T00:49:33,988 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 662bfac0cae17973cd76f23e3d607222, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733618973971.662bfac0cae17973cd76f23e3d607222.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0 2024-12-08T00:49:33,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43933 is added to blk_1073741836_1012 (size=88) 2024-12-08T00:49:33,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37845 is added to blk_1073741836_1012 (size=88) 2024-12-08T00:49:33,997 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733618973971.662bfac0cae17973cd76f23e3d607222.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:49:33,997 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing 662bfac0cae17973cd76f23e3d607222, disabling compactions & flushes 2024-12-08T00:49:33,997 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733618973971.662bfac0cae17973cd76f23e3d607222. 2024-12-08T00:49:33,997 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733618973971.662bfac0cae17973cd76f23e3d607222. 2024-12-08T00:49:33,997 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733618973971.662bfac0cae17973cd76f23e3d607222. after waiting 0 ms 2024-12-08T00:49:33,997 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733618973971.662bfac0cae17973cd76f23e3d607222. 2024-12-08T00:49:33,997 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733618973971.662bfac0cae17973cd76f23e3d607222. 2024-12-08T00:49:33,997 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 662bfac0cae17973cd76f23e3d607222: Waiting for close lock at 1733618973997Disabling compacts and flushes for region at 1733618973997Disabling writes for close at 1733618973997Writing region close event to WAL at 1733618973997Closed at 1733618973997 2024-12-08T00:49:33,998 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-08T00:49:33,999 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733618973971.662bfac0cae17973cd76f23e3d607222.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1733618973998"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733618973998"}]},"ts":"1733618973998"} 2024-12-08T00:49:34,001 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-08T00:49:34,002 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-08T00:49:34,003 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733618974002"}]},"ts":"1733618974002"} 2024-12-08T00:49:34,005 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-12-08T00:49:34,005 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=662bfac0cae17973cd76f23e3d607222, ASSIGN}] 2024-12-08T00:49:34,007 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=662bfac0cae17973cd76f23e3d607222, ASSIGN 2024-12-08T00:49:34,008 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=662bfac0cae17973cd76f23e3d607222, ASSIGN; state=OFFLINE, location=0f983e3e5be1,35097,1733618972831; forceNewPlan=false, retain=false 2024-12-08T00:49:34,159 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=662bfac0cae17973cd76f23e3d607222, regionState=OPENING, regionLocation=0f983e3e5be1,35097,1733618972831 2024-12-08T00:49:34,166 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=662bfac0cae17973cd76f23e3d607222, ASSIGN because future has completed 2024-12-08T00:49:34,167 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 662bfac0cae17973cd76f23e3d607222, server=0f983e3e5be1,35097,1733618972831}] 2024-12-08T00:49:34,332 INFO [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733618973971.662bfac0cae17973cd76f23e3d607222. 2024-12-08T00:49:34,332 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 662bfac0cae17973cd76f23e3d607222, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733618973971.662bfac0cae17973cd76f23e3d607222.', STARTKEY => '', ENDKEY => ''} 2024-12-08T00:49:34,333 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling 662bfac0cae17973cd76f23e3d607222 2024-12-08T00:49:34,333 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733618973971.662bfac0cae17973cd76f23e3d607222.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:49:34,334 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 662bfac0cae17973cd76f23e3d607222 2024-12-08T00:49:34,334 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 662bfac0cae17973cd76f23e3d607222 2024-12-08T00:49:34,336 INFO [StoreOpener-662bfac0cae17973cd76f23e3d607222-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 662bfac0cae17973cd76f23e3d607222 2024-12-08T00:49:34,337 INFO [StoreOpener-662bfac0cae17973cd76f23e3d607222-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 662bfac0cae17973cd76f23e3d607222 columnFamilyName info 2024-12-08T00:49:34,337 DEBUG [StoreOpener-662bfac0cae17973cd76f23e3d607222-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:49:34,338 INFO [StoreOpener-662bfac0cae17973cd76f23e3d607222-1 {}] regionserver.HStore(327): Store=662bfac0cae17973cd76f23e3d607222/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T00:49:34,338 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 662bfac0cae17973cd76f23e3d607222 2024-12-08T00:49:34,338 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/662bfac0cae17973cd76f23e3d607222 2024-12-08T00:49:34,339 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/662bfac0cae17973cd76f23e3d607222 2024-12-08T00:49:34,339 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 662bfac0cae17973cd76f23e3d607222 2024-12-08T00:49:34,339 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 662bfac0cae17973cd76f23e3d607222 2024-12-08T00:49:34,341 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 662bfac0cae17973cd76f23e3d607222 2024-12-08T00:49:34,343 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/662bfac0cae17973cd76f23e3d607222/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T00:49:34,344 INFO [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 662bfac0cae17973cd76f23e3d607222; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=836382, jitterRate=0.06351549923419952}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-08T00:49:34,344 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 662bfac0cae17973cd76f23e3d607222 2024-12-08T00:49:34,344 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 662bfac0cae17973cd76f23e3d607222: Running coprocessor pre-open hook at 1733618974334Writing region info on filesystem at 1733618974334Initializing all the Stores at 1733618974335 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733618974335Cleaning up temporary data from old regions at 1733618974339 (+4 ms)Running coprocessor post-open hooks at 1733618974344 (+5 ms)Region opened successfully at 1733618974344 2024-12-08T00:49:34,346 INFO [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733618973971.662bfac0cae17973cd76f23e3d607222., pid=6, masterSystemTime=1733618974322 2024-12-08T00:49:34,348 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733618973971.662bfac0cae17973cd76f23e3d607222. 2024-12-08T00:49:34,348 INFO [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733618973971.662bfac0cae17973cd76f23e3d607222. 2024-12-08T00:49:34,349 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=662bfac0cae17973cd76f23e3d607222, regionState=OPEN, openSeqNum=2, regionLocation=0f983e3e5be1,35097,1733618972831 2024-12-08T00:49:34,351 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 662bfac0cae17973cd76f23e3d607222, server=0f983e3e5be1,35097,1733618972831 because future has completed 2024-12-08T00:49:34,354 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-08T00:49:34,354 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 662bfac0cae17973cd76f23e3d607222, server=0f983e3e5be1,35097,1733618972831 in 185 msec 2024-12-08T00:49:34,356 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:34,357 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-08T00:49:34,357 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=662bfac0cae17973cd76f23e3d607222, ASSIGN in 349 msec 2024-12-08T00:49:34,357 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-08T00:49:34,358 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733618974357"}]},"ts":"1733618974357"} 2024-12-08T00:49:34,359 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-12-08T00:49:34,360 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-08T00:49:34,362 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 389 msec 2024-12-08T00:49:34,395 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:35,356 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:35,395 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:35,911 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-08T00:49:35,912 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-08T00:49:35,913 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-08T00:49:35,913 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-08T00:49:35,913 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-08T00:49:35,913 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-12-08T00:49:36,357 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:36,396 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:37,358 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:37,397 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:38,359 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:38,398 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:38,820 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:38,821 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:38,822 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:38,823 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:38,823 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:38,823 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:38,839 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:38,839 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:38,839 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:38,839 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:38,839 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:38,839 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:38,841 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:38,841 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:38,842 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:38,843 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:39,347 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-08T00:49:39,350 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:39,351 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:39,352 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:39,353 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:39,353 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:39,354 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:39,360 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:39,370 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:39,370 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:39,370 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:39,370 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:39,370 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:39,371 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:39,373 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:39,373 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:39,373 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:39,376 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:49:39,380 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-08T00:49:39,380 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-12-08T00:49:39,399 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:40,361 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:40,400 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:41,363 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:41,401 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:42,364 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:42,402 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:43,365 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:43,403 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:44,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-08T00:49:44,006 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-08T00:49:44,007 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-12-08T00:49:44,011 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-08T00:49:44,011 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733618973971.662bfac0cae17973cd76f23e3d607222. 2024-12-08T00:49:44,016 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733618973971.662bfac0cae17973cd76f23e3d607222., hostname=0f983e3e5be1,35097,1733618972831, seqNum=2] 2024-12-08T00:49:44,024 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-08T00:49:44,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-08T00:49:44,029 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-08T00:49:44,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-08T00:49:44,030 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T00:49:44,031 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T00:49:44,199 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35097 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-08T00:49:44,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733618973971.662bfac0cae17973cd76f23e3d607222. 2024-12-08T00:49:44,200 INFO [RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 662bfac0cae17973cd76f23e3d607222 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-08T00:49:44,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/662bfac0cae17973cd76f23e3d607222/.tmp/info/334d03fa90ea44eb84d7884a5ada2efb is 1080, key is row0001/info:/1733618984017/Put/seqid=0 2024-12-08T00:49:44,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43933 is added to blk_1073741837_1013 (size=6033) 2024-12-08T00:49:44,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37845 is added to blk_1073741837_1013 (size=6033) 2024-12-08T00:49:44,220 INFO [RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/662bfac0cae17973cd76f23e3d607222/.tmp/info/334d03fa90ea44eb84d7884a5ada2efb 2024-12-08T00:49:44,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/662bfac0cae17973cd76f23e3d607222/.tmp/info/334d03fa90ea44eb84d7884a5ada2efb as hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/662bfac0cae17973cd76f23e3d607222/info/334d03fa90ea44eb84d7884a5ada2efb 2024-12-08T00:49:44,231 INFO [RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/662bfac0cae17973cd76f23e3d607222/info/334d03fa90ea44eb84d7884a5ada2efb, entries=1, sequenceid=5, filesize=5.9 K 2024-12-08T00:49:44,232 INFO [RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 662bfac0cae17973cd76f23e3d607222 in 32ms, sequenceid=5, compaction requested=false 2024-12-08T00:49:44,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 662bfac0cae17973cd76f23e3d607222: 2024-12-08T00:49:44,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733618973971.662bfac0cae17973cd76f23e3d607222. 2024-12-08T00:49:44,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-08T00:49:44,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-08T00:49:44,240 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-08T00:49:44,240 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 206 msec 2024-12-08T00:49:44,243 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 215 msec 2024-12-08T00:49:44,367 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:44,404 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:45,368 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:45,405 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:46,368 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:46,405 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:47,369 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:47,406 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:48,370 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:48,408 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:49,371 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:49,408 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:50,372 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:50,409 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:51,373 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:51,410 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:52,375 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:52,411 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:53,376 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:53,413 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:54,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-08T00:49:54,106 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-08T00:49:54,114 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-08T00:49:54,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-08T00:49:54,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-12-08T00:49:54,117 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-08T00:49:54,118 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T00:49:54,118 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T00:49:54,273 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35097 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-12-08T00:49:54,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733618973971.662bfac0cae17973cd76f23e3d607222. 2024-12-08T00:49:54,274 INFO [RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing 662bfac0cae17973cd76f23e3d607222 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-08T00:49:54,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/662bfac0cae17973cd76f23e3d607222/.tmp/info/95c6ac8595654e05864ac7f681ee7e26 is 1080, key is row0002/info:/1733618994109/Put/seqid=0 2024-12-08T00:49:54,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43933 is added to blk_1073741838_1014 (size=6033) 2024-12-08T00:49:54,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37845 is added to blk_1073741838_1014 (size=6033) 2024-12-08T00:49:54,293 INFO [RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/662bfac0cae17973cd76f23e3d607222/.tmp/info/95c6ac8595654e05864ac7f681ee7e26 2024-12-08T00:49:54,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/662bfac0cae17973cd76f23e3d607222/.tmp/info/95c6ac8595654e05864ac7f681ee7e26 as hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/662bfac0cae17973cd76f23e3d607222/info/95c6ac8595654e05864ac7f681ee7e26 2024-12-08T00:49:54,308 INFO [RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/662bfac0cae17973cd76f23e3d607222/info/95c6ac8595654e05864ac7f681ee7e26, entries=1, sequenceid=9, filesize=5.9 K 2024-12-08T00:49:54,309 INFO [RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 662bfac0cae17973cd76f23e3d607222 in 35ms, sequenceid=9, compaction requested=false 2024-12-08T00:49:54,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for 662bfac0cae17973cd76f23e3d607222: 2024-12-08T00:49:54,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733618973971.662bfac0cae17973cd76f23e3d607222. 2024-12-08T00:49:54,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-12-08T00:49:54,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-12-08T00:49:54,314 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-12-08T00:49:54,314 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 193 msec 2024-12-08T00:49:54,317 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 201 msec 2024-12-08T00:49:54,377 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:54,414 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:55,378 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:55,379 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 after 68076ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor191.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:49:55,415 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:55,415 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta after 68099ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor191.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:49:56,379 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:56,416 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:57,381 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:57,417 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:58,382 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:58,417 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:59,383 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:49:59,418 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:00,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:00,419 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:01,386 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:01,420 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:02,387 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:02,422 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:02,658 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-08T00:50:03,389 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:03,423 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:04,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-12-08T00:50:04,187 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-08T00:50:04,194 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0f983e3e5be1%2C35097%2C1733618972831.1733619004194 2024-12-08T00:50:04,201 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:50:04,202 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:50:04,202 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:50:04,202 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:50:04,202 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:50:04,202 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/WALs/0f983e3e5be1,35097,1733618972831/0f983e3e5be1%2C35097%2C1733618972831.1733618973332 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/WALs/0f983e3e5be1,35097,1733618972831/0f983e3e5be1%2C35097%2C1733618972831.1733619004194 2024-12-08T00:50:04,203 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45331:45331),(127.0.0.1/127.0.0.1:46007:46007)] 2024-12-08T00:50:04,203 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/WALs/0f983e3e5be1,35097,1733618972831/0f983e3e5be1%2C35097%2C1733618972831.1733618973332 is not closed yet, will try archiving it next time 2024-12-08T00:50:04,205 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-08T00:50:04,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43933 is added to blk_1073741833_1009 (size=5546) 2024-12-08T00:50:04,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37845 is added to blk_1073741833_1009 (size=5546) 2024-12-08T00:50:04,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-08T00:50:04,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-12-08T00:50:04,208 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-08T00:50:04,210 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T00:50:04,210 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T00:50:04,364 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35097 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-12-08T00:50:04,365 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733618973971.662bfac0cae17973cd76f23e3d607222. 2024-12-08T00:50:04,365 INFO [RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing 662bfac0cae17973cd76f23e3d607222 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-08T00:50:04,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/662bfac0cae17973cd76f23e3d607222/.tmp/info/047f74ffad3441c48c4cfa645e64602a is 1080, key is row0003/info:/1733619004190/Put/seqid=0 2024-12-08T00:50:04,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43933 is added to blk_1073741840_1016 (size=6033) 2024-12-08T00:50:04,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37845 is added to blk_1073741840_1016 (size=6033) 2024-12-08T00:50:04,377 INFO [RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/662bfac0cae17973cd76f23e3d607222/.tmp/info/047f74ffad3441c48c4cfa645e64602a 2024-12-08T00:50:04,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/662bfac0cae17973cd76f23e3d607222/.tmp/info/047f74ffad3441c48c4cfa645e64602a as hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/662bfac0cae17973cd76f23e3d607222/info/047f74ffad3441c48c4cfa645e64602a 2024-12-08T00:50:04,387 INFO [RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/662bfac0cae17973cd76f23e3d607222/info/047f74ffad3441c48c4cfa645e64602a, entries=1, sequenceid=13, filesize=5.9 K 2024-12-08T00:50:04,388 INFO [RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 662bfac0cae17973cd76f23e3d607222 in 23ms, sequenceid=13, compaction requested=true 2024-12-08T00:50:04,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for 662bfac0cae17973cd76f23e3d607222: 2024-12-08T00:50:04,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733618973971.662bfac0cae17973cd76f23e3d607222. 2024-12-08T00:50:04,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-12-08T00:50:04,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-12-08T00:50:04,390 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:04,392 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-12-08T00:50:04,392 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 180 msec 2024-12-08T00:50:04,394 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 188 msec 2024-12-08T00:50:04,424 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:05,390 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:05,425 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:06,392 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:06,425 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:07,392 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:07,426 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:08,394 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:08,427 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:09,395 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:09,428 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:10,396 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:10,429 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:11,397 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:11,430 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:12,398 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:12,431 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:13,400 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:13,432 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:13,919 INFO [master/0f983e3e5be1:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-08T00:50:13,919 INFO [master/0f983e3e5be1:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-08T00:50:14,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-12-08T00:50:14,287 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-08T00:50:14,287 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:50:14,291 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:50:14,292 DEBUG [Time-limited test {}] regionserver.HStore(1541): 662bfac0cae17973cd76f23e3d607222/info is initiating minor compaction (all files) 2024-12-08T00:50:14,292 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-08T00:50:14,292 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T00:50:14,292 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 662bfac0cae17973cd76f23e3d607222/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733618973971.662bfac0cae17973cd76f23e3d607222. 2024-12-08T00:50:14,293 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/662bfac0cae17973cd76f23e3d607222/info/334d03fa90ea44eb84d7884a5ada2efb, hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/662bfac0cae17973cd76f23e3d607222/info/95c6ac8595654e05864ac7f681ee7e26, hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/662bfac0cae17973cd76f23e3d607222/info/047f74ffad3441c48c4cfa645e64602a] into tmpdir=hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/662bfac0cae17973cd76f23e3d607222/.tmp, totalSize=17.7 K 2024-12-08T00:50:14,294 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 334d03fa90ea44eb84d7884a5ada2efb, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1733618984017 2024-12-08T00:50:14,296 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 95c6ac8595654e05864ac7f681ee7e26, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1733618994109 2024-12-08T00:50:14,296 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 047f74ffad3441c48c4cfa645e64602a, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733619004190 2024-12-08T00:50:14,308 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 662bfac0cae17973cd76f23e3d607222#info#compaction#45 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:50:14,309 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/662bfac0cae17973cd76f23e3d607222/.tmp/info/388be1f29a83432782c99b1f5785c998 is 1080, key is row0001/info:/1733618984017/Put/seqid=0 2024-12-08T00:50:14,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37845 is added to blk_1073741841_1017 (size=8296) 2024-12-08T00:50:14,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43933 is added to blk_1073741841_1017 (size=8296) 2024-12-08T00:50:14,321 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/662bfac0cae17973cd76f23e3d607222/.tmp/info/388be1f29a83432782c99b1f5785c998 as hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/662bfac0cae17973cd76f23e3d607222/info/388be1f29a83432782c99b1f5785c998 2024-12-08T00:50:14,329 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 662bfac0cae17973cd76f23e3d607222/info of 662bfac0cae17973cd76f23e3d607222 into 388be1f29a83432782c99b1f5785c998(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:50:14,329 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 662bfac0cae17973cd76f23e3d607222: 2024-12-08T00:50:14,331 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0f983e3e5be1%2C35097%2C1733618972831.1733619014331 2024-12-08T00:50:14,337 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:50:14,337 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:50:14,337 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:50:14,337 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:50:14,338 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:50:14,338 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/WALs/0f983e3e5be1,35097,1733618972831/0f983e3e5be1%2C35097%2C1733618972831.1733619004194 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/WALs/0f983e3e5be1,35097,1733618972831/0f983e3e5be1%2C35097%2C1733618972831.1733619014331 2024-12-08T00:50:14,338 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45331:45331),(127.0.0.1/127.0.0.1:46007:46007)] 2024-12-08T00:50:14,338 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/WALs/0f983e3e5be1,35097,1733618972831/0f983e3e5be1%2C35097%2C1733618972831.1733619004194 is not closed yet, will try archiving it next time 2024-12-08T00:50:14,339 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/WALs/0f983e3e5be1,35097,1733618972831/0f983e3e5be1%2C35097%2C1733618972831.1733618973332 to hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/oldWALs/0f983e3e5be1%2C35097%2C1733618972831.1733618973332 2024-12-08T00:50:14,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37845 is added to blk_1073741839_1015 (size=2520) 2024-12-08T00:50:14,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43933 is added to blk_1073741839_1015 (size=2520) 2024-12-08T00:50:14,340 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-08T00:50:14,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-08T00:50:14,342 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-08T00:50:14,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-12-08T00:50:14,343 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T00:50:14,343 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T00:50:14,401 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:14,433 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:14,496 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35097 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-12-08T00:50:14,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733618973971.662bfac0cae17973cd76f23e3d607222. 2024-12-08T00:50:14,497 INFO [RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing 662bfac0cae17973cd76f23e3d607222 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-08T00:50:14,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/662bfac0cae17973cd76f23e3d607222/.tmp/info/3db6198fa21a4b8cbeac8a0772914a7d is 1080, key is row0000/info:/1733619014330/Put/seqid=0 2024-12-08T00:50:14,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43933 is added to blk_1073741843_1019 (size=6033) 2024-12-08T00:50:14,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37845 is added to blk_1073741843_1019 (size=6033) 2024-12-08T00:50:14,514 INFO [RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/662bfac0cae17973cd76f23e3d607222/.tmp/info/3db6198fa21a4b8cbeac8a0772914a7d 2024-12-08T00:50:14,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/662bfac0cae17973cd76f23e3d607222/.tmp/info/3db6198fa21a4b8cbeac8a0772914a7d as hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/662bfac0cae17973cd76f23e3d607222/info/3db6198fa21a4b8cbeac8a0772914a7d 2024-12-08T00:50:14,525 INFO [RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/662bfac0cae17973cd76f23e3d607222/info/3db6198fa21a4b8cbeac8a0772914a7d, entries=1, sequenceid=18, filesize=5.9 K 2024-12-08T00:50:14,526 INFO [RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 662bfac0cae17973cd76f23e3d607222 in 29ms, sequenceid=18, compaction requested=false 2024-12-08T00:50:14,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 662bfac0cae17973cd76f23e3d607222: 2024-12-08T00:50:14,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733618973971.662bfac0cae17973cd76f23e3d607222. 2024-12-08T00:50:14,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-12-08T00:50:14,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-12-08T00:50:14,530 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-12-08T00:50:14,530 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 185 msec 2024-12-08T00:50:14,533 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 191 msec 2024-12-08T00:50:15,402 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:15,434 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:16,403 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:16,435 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:17,404 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:17,436 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:18,406 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:18,437 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:19,333 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 662bfac0cae17973cd76f23e3d607222, had cached 0 bytes from a total of 14329 2024-12-08T00:50:19,407 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:19,438 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:20,408 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:20,439 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:21,409 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:21,440 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:22,410 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:22,441 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:23,411 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:23,442 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:24,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-12-08T00:50:24,357 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-08T00:50:24,365 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0f983e3e5be1%2C35097%2C1733618972831.1733619024365 2024-12-08T00:50:24,372 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:50:24,372 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:50:24,372 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:50:24,372 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:50:24,372 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:50:24,372 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/WALs/0f983e3e5be1,35097,1733618972831/0f983e3e5be1%2C35097%2C1733618972831.1733619014331 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/WALs/0f983e3e5be1,35097,1733618972831/0f983e3e5be1%2C35097%2C1733618972831.1733619024365 2024-12-08T00:50:24,373 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45331:45331),(127.0.0.1/127.0.0.1:46007:46007)] 2024-12-08T00:50:24,373 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/WALs/0f983e3e5be1,35097,1733618972831/0f983e3e5be1%2C35097%2C1733618972831.1733619014331 is not closed yet, will try archiving it next time 2024-12-08T00:50:24,373 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-08T00:50:24,373 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/WALs/0f983e3e5be1,35097,1733618972831/0f983e3e5be1%2C35097%2C1733618972831.1733619004194 to hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/oldWALs/0f983e3e5be1%2C35097%2C1733618972831.1733619004194 2024-12-08T00:50:24,373 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-08T00:50:24,373 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T00:50:24,374 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:50:24,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37845 is added to blk_1073741842_1018 (size=2026) 2024-12-08T00:50:24,375 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:50:24,375 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-08T00:50:24,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43933 is added to blk_1073741842_1018 (size=2026) 2024-12-08T00:50:24,375 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-08T00:50:24,375 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1167285988, stopped=false 2024-12-08T00:50:24,375 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=0f983e3e5be1,42913,1733618972678 2024-12-08T00:50:24,412 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:24,423 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42913-0x10002f3f6900000, quorum=127.0.0.1:62505, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T00:50:24,423 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35097-0x10002f3f6900001, quorum=127.0.0.1:62505, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T00:50:24,423 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-08T00:50:24,423 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42913-0x10002f3f6900000, quorum=127.0.0.1:62505, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:50:24,423 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35097-0x10002f3f6900001, quorum=127.0.0.1:62505, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:50:24,424 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-08T00:50:24,424 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T00:50:24,424 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:42913-0x10002f3f6900000, quorum=127.0.0.1:62505, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T00:50:24,425 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:50:24,425 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35097-0x10002f3f6900001, quorum=127.0.0.1:62505, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T00:50:24,425 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '0f983e3e5be1,35097,1733618972831' ***** 2024-12-08T00:50:24,425 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-08T00:50:24,425 INFO [RS:0;0f983e3e5be1:35097 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-08T00:50:24,425 INFO [RS:0;0f983e3e5be1:35097 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-08T00:50:24,426 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-08T00:50:24,426 INFO [RS:0;0f983e3e5be1:35097 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-08T00:50:24,426 INFO [RS:0;0f983e3e5be1:35097 {}] regionserver.HRegionServer(3091): Received CLOSE for 662bfac0cae17973cd76f23e3d607222 2024-12-08T00:50:24,426 INFO [RS:0;0f983e3e5be1:35097 {}] regionserver.HRegionServer(959): stopping server 0f983e3e5be1,35097,1733618972831 2024-12-08T00:50:24,426 INFO [RS:0;0f983e3e5be1:35097 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T00:50:24,426 INFO [RS:0;0f983e3e5be1:35097 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;0f983e3e5be1:35097. 2024-12-08T00:50:24,426 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 662bfac0cae17973cd76f23e3d607222, disabling compactions & flushes 2024-12-08T00:50:24,427 INFO [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733618973971.662bfac0cae17973cd76f23e3d607222. 2024-12-08T00:50:24,427 DEBUG [RS:0;0f983e3e5be1:35097 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T00:50:24,427 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733618973971.662bfac0cae17973cd76f23e3d607222. 2024-12-08T00:50:24,427 DEBUG [RS:0;0f983e3e5be1:35097 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:50:24,427 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733618973971.662bfac0cae17973cd76f23e3d607222. after waiting 0 ms 2024-12-08T00:50:24,427 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733618973971.662bfac0cae17973cd76f23e3d607222. 2024-12-08T00:50:24,427 INFO [RS:0;0f983e3e5be1:35097 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-08T00:50:24,427 INFO [RS:0;0f983e3e5be1:35097 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-08T00:50:24,427 INFO [RS:0;0f983e3e5be1:35097 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-08T00:50:24,427 INFO [RS:0;0f983e3e5be1:35097 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-08T00:50:24,427 INFO [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 662bfac0cae17973cd76f23e3d607222 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-08T00:50:24,427 INFO [RS:0;0f983e3e5be1:35097 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-08T00:50:24,428 DEBUG [RS:0;0f983e3e5be1:35097 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 662bfac0cae17973cd76f23e3d607222=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733618973971.662bfac0cae17973cd76f23e3d607222.} 2024-12-08T00:50:24,428 DEBUG [RS:0;0f983e3e5be1:35097 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 662bfac0cae17973cd76f23e3d607222 2024-12-08T00:50:24,428 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-08T00:50:24,428 INFO [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-08T00:50:24,428 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-08T00:50:24,428 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-08T00:50:24,428 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-08T00:50:24,428 INFO [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-12-08T00:50:24,433 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/662bfac0cae17973cd76f23e3d607222/.tmp/info/143196c15f8942b0af26ab4850558fba is 1080, key is row0001/info:/1733619024360/Put/seqid=0 2024-12-08T00:50:24,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37845 is added to blk_1073741845_1021 (size=6033) 2024-12-08T00:50:24,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43933 is added to blk_1073741845_1021 (size=6033) 2024-12-08T00:50:24,438 INFO [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/662bfac0cae17973cd76f23e3d607222/.tmp/info/143196c15f8942b0af26ab4850558fba 2024-12-08T00:50:24,443 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/662bfac0cae17973cd76f23e3d607222/.tmp/info/143196c15f8942b0af26ab4850558fba as hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/662bfac0cae17973cd76f23e3d607222/info/143196c15f8942b0af26ab4850558fba 2024-12-08T00:50:24,443 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:24,446 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/data/hbase/meta/1588230740/.tmp/info/34ea5aa2585748259a54f613a1088da6 is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733618973971.662bfac0cae17973cd76f23e3d607222./info:regioninfo/1733618974349/Put/seqid=0 2024-12-08T00:50:24,449 INFO [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/662bfac0cae17973cd76f23e3d607222/info/143196c15f8942b0af26ab4850558fba, entries=1, sequenceid=22, filesize=5.9 K 2024-12-08T00:50:24,450 INFO [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 662bfac0cae17973cd76f23e3d607222 in 22ms, sequenceid=22, compaction requested=true 2024-12-08T00:50:24,450 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733618973971.662bfac0cae17973cd76f23e3d607222.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/662bfac0cae17973cd76f23e3d607222/info/334d03fa90ea44eb84d7884a5ada2efb, hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/662bfac0cae17973cd76f23e3d607222/info/95c6ac8595654e05864ac7f681ee7e26, hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/662bfac0cae17973cd76f23e3d607222/info/047f74ffad3441c48c4cfa645e64602a] to archive 2024-12-08T00:50:24,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43933 is added to blk_1073741846_1022 (size=7308) 2024-12-08T00:50:24,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37845 is added to blk_1073741846_1022 (size=7308) 2024-12-08T00:50:24,451 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733618973971.662bfac0cae17973cd76f23e3d607222.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-08T00:50:24,451 INFO [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/data/hbase/meta/1588230740/.tmp/info/34ea5aa2585748259a54f613a1088da6 2024-12-08T00:50:24,453 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733618973971.662bfac0cae17973cd76f23e3d607222.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/662bfac0cae17973cd76f23e3d607222/info/334d03fa90ea44eb84d7884a5ada2efb to hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/662bfac0cae17973cd76f23e3d607222/info/334d03fa90ea44eb84d7884a5ada2efb 2024-12-08T00:50:24,454 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733618973971.662bfac0cae17973cd76f23e3d607222.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/662bfac0cae17973cd76f23e3d607222/info/95c6ac8595654e05864ac7f681ee7e26 to hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/662bfac0cae17973cd76f23e3d607222/info/95c6ac8595654e05864ac7f681ee7e26 2024-12-08T00:50:24,455 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733618973971.662bfac0cae17973cd76f23e3d607222.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/662bfac0cae17973cd76f23e3d607222/info/047f74ffad3441c48c4cfa645e64602a to hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/662bfac0cae17973cd76f23e3d607222/info/047f74ffad3441c48c4cfa645e64602a 2024-12-08T00:50:24,456 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733618973971.662bfac0cae17973cd76f23e3d607222.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=0f983e3e5be1:42913 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-12-08T00:50:24,456 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733618973971.662bfac0cae17973cd76f23e3d607222.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [334d03fa90ea44eb84d7884a5ada2efb=6033, 95c6ac8595654e05864ac7f681ee7e26=6033, 047f74ffad3441c48c4cfa645e64602a=6033] 2024-12-08T00:50:24,459 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/662bfac0cae17973cd76f23e3d607222/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-12-08T00:50:24,459 INFO [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733618973971.662bfac0cae17973cd76f23e3d607222. 2024-12-08T00:50:24,459 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 662bfac0cae17973cd76f23e3d607222: Waiting for close lock at 1733619024426Running coprocessor pre-close hooks at 1733619024426Disabling compacts and flushes for region at 1733619024426Disabling writes for close at 1733619024427 (+1 ms)Obtaining lock to block concurrent updates at 1733619024427Preparing flush snapshotting stores in 662bfac0cae17973cd76f23e3d607222 at 1733619024427Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733618973971.662bfac0cae17973cd76f23e3d607222., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1733619024428 (+1 ms)Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733618973971.662bfac0cae17973cd76f23e3d607222. at 1733619024429 (+1 ms)Flushing 662bfac0cae17973cd76f23e3d607222/info: creating writer at 1733619024429Flushing 662bfac0cae17973cd76f23e3d607222/info: appending metadata at 1733619024432 (+3 ms)Flushing 662bfac0cae17973cd76f23e3d607222/info: closing flushed file at 1733619024433 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5937f190: reopening flushed file at 1733619024442 (+9 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 662bfac0cae17973cd76f23e3d607222 in 22ms, sequenceid=22, compaction requested=true at 1733619024450 (+8 ms)Writing region close event to WAL at 1733619024456 (+6 ms)Running coprocessor post-close hooks at 1733619024459 (+3 ms)Closed at 1733619024459 2024-12-08T00:50:24,460 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733618973971.662bfac0cae17973cd76f23e3d607222. 2024-12-08T00:50:24,468 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/data/hbase/meta/1588230740/.tmp/ns/d6685af342d84ca492558b741d9e5333 is 43, key is default/ns:d/1733618973900/Put/seqid=0 2024-12-08T00:50:24,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43933 is added to blk_1073741847_1023 (size=5153) 2024-12-08T00:50:24,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37845 is added to blk_1073741847_1023 (size=5153) 2024-12-08T00:50:24,473 INFO [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/data/hbase/meta/1588230740/.tmp/ns/d6685af342d84ca492558b741d9e5333 2024-12-08T00:50:24,490 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/data/hbase/meta/1588230740/.tmp/table/8318bc353fdd471c98ff80cadba409d6 is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1733618974357/Put/seqid=0 2024-12-08T00:50:24,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43933 is added to blk_1073741848_1024 (size=5508) 2024-12-08T00:50:24,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37845 is added to blk_1073741848_1024 (size=5508) 2024-12-08T00:50:24,494 INFO [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/data/hbase/meta/1588230740/.tmp/table/8318bc353fdd471c98ff80cadba409d6 2024-12-08T00:50:24,499 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/data/hbase/meta/1588230740/.tmp/info/34ea5aa2585748259a54f613a1088da6 as hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/data/hbase/meta/1588230740/info/34ea5aa2585748259a54f613a1088da6 2024-12-08T00:50:24,505 INFO [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/data/hbase/meta/1588230740/info/34ea5aa2585748259a54f613a1088da6, entries=10, sequenceid=11, filesize=7.1 K 2024-12-08T00:50:24,506 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/data/hbase/meta/1588230740/.tmp/ns/d6685af342d84ca492558b741d9e5333 as hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/data/hbase/meta/1588230740/ns/d6685af342d84ca492558b741d9e5333 2024-12-08T00:50:24,513 INFO [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/data/hbase/meta/1588230740/ns/d6685af342d84ca492558b741d9e5333, entries=2, sequenceid=11, filesize=5.0 K 2024-12-08T00:50:24,514 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/data/hbase/meta/1588230740/.tmp/table/8318bc353fdd471c98ff80cadba409d6 as hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/data/hbase/meta/1588230740/table/8318bc353fdd471c98ff80cadba409d6 2024-12-08T00:50:24,519 INFO [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/data/hbase/meta/1588230740/table/8318bc353fdd471c98ff80cadba409d6, entries=2, sequenceid=11, filesize=5.4 K 2024-12-08T00:50:24,521 INFO [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 93ms, sequenceid=11, compaction requested=false 2024-12-08T00:50:24,525 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-08T00:50:24,526 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-08T00:50:24,526 INFO [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-08T00:50:24,526 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733619024428Running coprocessor pre-close hooks at 1733619024428Disabling compacts and flushes for region at 1733619024428Disabling writes for close at 1733619024428Obtaining lock to block concurrent updates at 1733619024428Preparing flush snapshotting stores in 1588230740 at 1733619024428Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1733619024429 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733619024430 (+1 ms)Flushing 1588230740/info: creating writer at 1733619024431 (+1 ms)Flushing 1588230740/info: appending metadata at 1733619024446 (+15 ms)Flushing 1588230740/info: closing flushed file at 1733619024446Flushing 1588230740/ns: creating writer at 1733619024456 (+10 ms)Flushing 1588230740/ns: appending metadata at 1733619024468 (+12 ms)Flushing 1588230740/ns: closing flushed file at 1733619024468Flushing 1588230740/table: creating writer at 1733619024477 (+9 ms)Flushing 1588230740/table: appending metadata at 1733619024489 (+12 ms)Flushing 1588230740/table: closing flushed file at 1733619024489Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7eab617: reopening flushed file at 1733619024498 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@44f8d4fa: reopening flushed file at 1733619024505 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@17b29866: reopening flushed file at 1733619024513 (+8 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 93ms, sequenceid=11, compaction requested=false at 1733619024521 (+8 ms)Writing region close event to WAL at 1733619024522 (+1 ms)Running coprocessor post-close hooks at 1733619024526 (+4 ms)Closed at 1733619024526 2024-12-08T00:50:24,526 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-08T00:50:24,628 INFO [RS:0;0f983e3e5be1:35097 {}] regionserver.HRegionServer(976): stopping server 0f983e3e5be1,35097,1733618972831; all regions closed. 2024-12-08T00:50:24,629 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:50:24,629 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:50:24,629 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:50:24,630 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:50:24,630 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:50:24,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43933 is added to blk_1073741834_1010 (size=3306) 2024-12-08T00:50:24,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37845 is added to blk_1073741834_1010 (size=3306) 2024-12-08T00:50:24,639 DEBUG [RS:0;0f983e3e5be1:35097 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/oldWALs 2024-12-08T00:50:24,639 INFO [RS:0;0f983e3e5be1:35097 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 0f983e3e5be1%2C35097%2C1733618972831.meta:.meta(num 1733618973808) 2024-12-08T00:50:24,640 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:50:24,640 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:50:24,640 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:50:24,640 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:50:24,640 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:50:24,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37845 is added to blk_1073741844_1020 (size=1252) 2024-12-08T00:50:24,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43933 is added to blk_1073741844_1020 (size=1252) 2024-12-08T00:50:24,647 DEBUG [RS:0;0f983e3e5be1:35097 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/oldWALs 2024-12-08T00:50:24,647 INFO [RS:0;0f983e3e5be1:35097 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 0f983e3e5be1%2C35097%2C1733618972831:(num 1733619024365) 2024-12-08T00:50:24,647 DEBUG [RS:0;0f983e3e5be1:35097 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:50:24,647 INFO [RS:0;0f983e3e5be1:35097 {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T00:50:24,647 INFO [RS:0;0f983e3e5be1:35097 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T00:50:24,647 INFO [RS:0;0f983e3e5be1:35097 {}] hbase.ChoreService(370): Chore service for: regionserver/0f983e3e5be1:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-08T00:50:24,647 INFO [RS:0;0f983e3e5be1:35097 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T00:50:24,648 INFO [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T00:50:24,648 INFO [RS:0;0f983e3e5be1:35097 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35097 2024-12-08T00:50:24,681 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42913-0x10002f3f6900000, quorum=127.0.0.1:62505, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T00:50:24,681 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35097-0x10002f3f6900001, quorum=127.0.0.1:62505, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/0f983e3e5be1,35097,1733618972831 2024-12-08T00:50:24,681 INFO [RS:0;0f983e3e5be1:35097 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T00:50:24,682 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [0f983e3e5be1,35097,1733618972831] 2024-12-08T00:50:24,698 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/0f983e3e5be1,35097,1733618972831 already deleted, retry=false 2024-12-08T00:50:24,698 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 0f983e3e5be1,35097,1733618972831 expired; onlineServers=0 2024-12-08T00:50:24,698 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '0f983e3e5be1,42913,1733618972678' ***** 2024-12-08T00:50:24,698 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-08T00:50:24,698 INFO [M:0;0f983e3e5be1:42913 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T00:50:24,698 INFO [M:0;0f983e3e5be1:42913 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T00:50:24,698 DEBUG [M:0;0f983e3e5be1:42913 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-08T00:50:24,698 DEBUG [M:0;0f983e3e5be1:42913 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-08T00:50:24,698 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-08T00:50:24,698 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster-HFileCleaner.small.0-1733618973142 {}] cleaner.HFileCleaner(306): Exit Thread[master/0f983e3e5be1:0:becomeActiveMaster-HFileCleaner.small.0-1733618973142,5,FailOnTimeoutGroup] 2024-12-08T00:50:24,698 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster-HFileCleaner.large.0-1733618973142 {}] cleaner.HFileCleaner(306): Exit Thread[master/0f983e3e5be1:0:becomeActiveMaster-HFileCleaner.large.0-1733618973142,5,FailOnTimeoutGroup] 2024-12-08T00:50:24,698 INFO [M:0;0f983e3e5be1:42913 {}] hbase.ChoreService(370): Chore service for: master/0f983e3e5be1:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-08T00:50:24,698 INFO [M:0;0f983e3e5be1:42913 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T00:50:24,698 DEBUG [M:0;0f983e3e5be1:42913 {}] master.HMaster(1795): Stopping service threads 2024-12-08T00:50:24,698 INFO [M:0;0f983e3e5be1:42913 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-08T00:50:24,698 INFO [M:0;0f983e3e5be1:42913 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-08T00:50:24,699 INFO [M:0;0f983e3e5be1:42913 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-08T00:50:24,699 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-08T00:50:24,706 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42913-0x10002f3f6900000, quorum=127.0.0.1:62505, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-08T00:50:24,706 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42913-0x10002f3f6900000, quorum=127.0.0.1:62505, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:50:24,706 DEBUG [M:0;0f983e3e5be1:42913 {}] zookeeper.ZKUtil(347): master:42913-0x10002f3f6900000, quorum=127.0.0.1:62505, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-08T00:50:24,706 WARN [M:0;0f983e3e5be1:42913 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-08T00:50:24,706 INFO [M:0;0f983e3e5be1:42913 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/.lastflushedseqids 2024-12-08T00:50:24,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37845 is added to blk_1073741849_1025 (size=130) 2024-12-08T00:50:24,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43933 is added to blk_1073741849_1025 (size=130) 2024-12-08T00:50:24,713 INFO [M:0;0f983e3e5be1:42913 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-08T00:50:24,713 INFO [M:0;0f983e3e5be1:42913 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-08T00:50:24,713 DEBUG [M:0;0f983e3e5be1:42913 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-08T00:50:24,713 INFO [M:0;0f983e3e5be1:42913 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:50:24,713 DEBUG [M:0;0f983e3e5be1:42913 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:50:24,713 DEBUG [M:0;0f983e3e5be1:42913 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-08T00:50:24,713 DEBUG [M:0;0f983e3e5be1:42913 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:50:24,713 INFO [M:0;0f983e3e5be1:42913 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.57 KB heapSize=54.96 KB 2024-12-08T00:50:24,730 DEBUG [M:0;0f983e3e5be1:42913 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6b4fca6f080546518d87f541f86d5f8f is 82, key is hbase:meta,,1/info:regioninfo/1733618973839/Put/seqid=0 2024-12-08T00:50:24,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43933 is added to blk_1073741850_1026 (size=5672) 2024-12-08T00:50:24,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37845 is added to blk_1073741850_1026 (size=5672) 2024-12-08T00:50:24,735 INFO [M:0;0f983e3e5be1:42913 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6b4fca6f080546518d87f541f86d5f8f 2024-12-08T00:50:24,752 DEBUG [M:0;0f983e3e5be1:42913 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/4d1f136adef542f4b7e955b550e68703 is 798, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733618974361/Put/seqid=0 2024-12-08T00:50:24,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43933 is added to blk_1073741851_1027 (size=7820) 2024-12-08T00:50:24,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37845 is added to blk_1073741851_1027 (size=7820) 2024-12-08T00:50:24,757 INFO [M:0;0f983e3e5be1:42913 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.96 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/4d1f136adef542f4b7e955b550e68703 2024-12-08T00:50:24,761 INFO [M:0;0f983e3e5be1:42913 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 4d1f136adef542f4b7e955b550e68703 2024-12-08T00:50:24,774 DEBUG [M:0;0f983e3e5be1:42913 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a2ff212cadcb4175aec5fd6d09b335c4 is 69, key is 0f983e3e5be1,35097,1733618972831/rs:state/1733618973186/Put/seqid=0 2024-12-08T00:50:24,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37845 is added to blk_1073741852_1028 (size=5156) 2024-12-08T00:50:24,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43933 is added to blk_1073741852_1028 (size=5156) 2024-12-08T00:50:24,778 INFO [M:0;0f983e3e5be1:42913 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a2ff212cadcb4175aec5fd6d09b335c4 2024-12-08T00:50:24,790 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35097-0x10002f3f6900001, quorum=127.0.0.1:62505, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T00:50:24,790 INFO [RS:0;0f983e3e5be1:35097 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T00:50:24,790 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35097-0x10002f3f6900001, quorum=127.0.0.1:62505, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T00:50:24,790 INFO [RS:0;0f983e3e5be1:35097 {}] regionserver.HRegionServer(1031): Exiting; stopping=0f983e3e5be1,35097,1733618972831; zookeeper connection closed. 2024-12-08T00:50:24,790 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1b7d208d {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1b7d208d 2024-12-08T00:50:24,790 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-08T00:50:24,795 DEBUG [M:0;0f983e3e5be1:42913 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/33591b7cadf9455b96b2eec1298fce28 is 52, key is load_balancer_on/state:d/1733618973966/Put/seqid=0 2024-12-08T00:50:24,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43933 is added to blk_1073741853_1029 (size=5056) 2024-12-08T00:50:24,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37845 is added to blk_1073741853_1029 (size=5056) 2024-12-08T00:50:24,799 INFO [M:0;0f983e3e5be1:42913 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/33591b7cadf9455b96b2eec1298fce28 2024-12-08T00:50:24,804 DEBUG [M:0;0f983e3e5be1:42913 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6b4fca6f080546518d87f541f86d5f8f as hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/6b4fca6f080546518d87f541f86d5f8f 2024-12-08T00:50:24,809 INFO [M:0;0f983e3e5be1:42913 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/6b4fca6f080546518d87f541f86d5f8f, entries=8, sequenceid=121, filesize=5.5 K 2024-12-08T00:50:24,810 DEBUG [M:0;0f983e3e5be1:42913 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/4d1f136adef542f4b7e955b550e68703 as hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/4d1f136adef542f4b7e955b550e68703 2024-12-08T00:50:24,814 INFO [M:0;0f983e3e5be1:42913 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 4d1f136adef542f4b7e955b550e68703 2024-12-08T00:50:24,814 INFO [M:0;0f983e3e5be1:42913 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/4d1f136adef542f4b7e955b550e68703, entries=14, sequenceid=121, filesize=7.6 K 2024-12-08T00:50:24,815 DEBUG [M:0;0f983e3e5be1:42913 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a2ff212cadcb4175aec5fd6d09b335c4 as hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/a2ff212cadcb4175aec5fd6d09b335c4 2024-12-08T00:50:24,820 INFO [M:0;0f983e3e5be1:42913 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/a2ff212cadcb4175aec5fd6d09b335c4, entries=1, sequenceid=121, filesize=5.0 K 2024-12-08T00:50:24,822 DEBUG [M:0;0f983e3e5be1:42913 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/33591b7cadf9455b96b2eec1298fce28 as hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/33591b7cadf9455b96b2eec1298fce28 2024-12-08T00:50:24,828 INFO [M:0;0f983e3e5be1:42913 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46567/user/jenkins/test-data/a3e9745c-b9a8-c247-f8e7-f329a4720df0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/33591b7cadf9455b96b2eec1298fce28, entries=1, sequenceid=121, filesize=4.9 K 2024-12-08T00:50:24,830 INFO [M:0;0f983e3e5be1:42913 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.57 KB/44611, heapSize ~54.90 KB/56216, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 116ms, sequenceid=121, compaction requested=false 2024-12-08T00:50:24,831 INFO [M:0;0f983e3e5be1:42913 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:50:24,831 DEBUG [M:0;0f983e3e5be1:42913 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733619024713Disabling compacts and flushes for region at 1733619024713Disabling writes for close at 1733619024713Obtaining lock to block concurrent updates at 1733619024713Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733619024713Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44611, getHeapSize=56216, getOffHeapSize=0, getCellsCount=140 at 1733619024713Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733619024714 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733619024714Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733619024730 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733619024730Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733619024739 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733619024752 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733619024752Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733619024762 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733619024774 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733619024774Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733619024782 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733619024795 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733619024795Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@24e8207a: reopening flushed file at 1733619024803 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6c8c09bc: reopening flushed file at 1733619024809 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6b52ee8d: reopening flushed file at 1733619024814 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5f6d7b41: reopening flushed file at 1733619024821 (+7 ms)Finished flush of dataSize ~43.57 KB/44611, heapSize ~54.90 KB/56216, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 116ms, sequenceid=121, compaction requested=false at 1733619024830 (+9 ms)Writing region close event to WAL at 1733619024831 (+1 ms)Closed at 1733619024831 2024-12-08T00:50:24,831 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:50:24,831 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:50:24,831 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:50:24,832 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:50:24,832 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:50:24,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37845 is added to blk_1073741830_1006 (size=53008) 2024-12-08T00:50:24,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43933 is added to blk_1073741830_1006 (size=53008) 2024-12-08T00:50:24,834 INFO [M:0;0f983e3e5be1:42913 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-08T00:50:24,834 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T00:50:24,834 INFO [M:0;0f983e3e5be1:42913 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42913 2024-12-08T00:50:24,834 INFO [M:0;0f983e3e5be1:42913 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T00:50:24,956 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42913-0x10002f3f6900000, quorum=127.0.0.1:62505, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T00:50:24,956 INFO [M:0;0f983e3e5be1:42913 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T00:50:24,956 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42913-0x10002f3f6900000, quorum=127.0.0.1:62505, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T00:50:24,960 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@433a8e25{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T00:50:24,960 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@570a7045{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T00:50:24,960 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T00:50:24,960 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2635e80{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T00:50:24,960 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7b7ba33e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b762e3f-9801-d913-5546-f2f7c758b88c/hadoop.log.dir/,STOPPED} 2024-12-08T00:50:24,963 WARN [BP-1390618451-172.17.0.2-1733618971046 heartbeating to localhost/127.0.0.1:46567 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T00:50:24,963 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T00:50:24,963 WARN [BP-1390618451-172.17.0.2-1733618971046 heartbeating to localhost/127.0.0.1:46567 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1390618451-172.17.0.2-1733618971046 (Datanode Uuid 52f0eeca-55bd-4cb1-8775-ea5a074d580f) service to localhost/127.0.0.1:46567 2024-12-08T00:50:24,963 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T00:50:24,963 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b762e3f-9801-d913-5546-f2f7c758b88c/cluster_190049d3-13ea-7573-72c2-be467c979f0f/data/data3/current/BP-1390618451-172.17.0.2-1733618971046 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T00:50:24,964 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b762e3f-9801-d913-5546-f2f7c758b88c/cluster_190049d3-13ea-7573-72c2-be467c979f0f/data/data4/current/BP-1390618451-172.17.0.2-1733618971046 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T00:50:24,964 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T00:50:24,967 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@15cc44b6{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T00:50:24,967 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@31ad985a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T00:50:24,967 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T00:50:24,967 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@33ab71f5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T00:50:24,967 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@460100b8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b762e3f-9801-d913-5546-f2f7c758b88c/hadoop.log.dir/,STOPPED} 2024-12-08T00:50:24,969 WARN [BP-1390618451-172.17.0.2-1733618971046 heartbeating to localhost/127.0.0.1:46567 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T00:50:24,969 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T00:50:24,969 WARN [BP-1390618451-172.17.0.2-1733618971046 heartbeating to localhost/127.0.0.1:46567 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1390618451-172.17.0.2-1733618971046 (Datanode Uuid f974d580-cfef-48c3-a483-6ed378ef3415) service to localhost/127.0.0.1:46567 2024-12-08T00:50:24,969 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T00:50:24,970 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b762e3f-9801-d913-5546-f2f7c758b88c/cluster_190049d3-13ea-7573-72c2-be467c979f0f/data/data1/current/BP-1390618451-172.17.0.2-1733618971046 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T00:50:24,970 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b762e3f-9801-d913-5546-f2f7c758b88c/cluster_190049d3-13ea-7573-72c2-be467c979f0f/data/data2/current/BP-1390618451-172.17.0.2-1733618971046 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T00:50:24,970 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T00:50:24,976 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1aa3c43{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-08T00:50:24,977 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@504cda16{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T00:50:24,977 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T00:50:24,977 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7ca05505{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T00:50:24,977 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3804bd01{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b762e3f-9801-d913-5546-f2f7c758b88c/hadoop.log.dir/,STOPPED} 2024-12-08T00:50:24,983 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-08T00:50:25,004 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-08T00:50:25,010 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=209 (was 182) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46567 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:46567 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46567 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:46567 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46567 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/0f983e3e5be1:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:46567 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:46567 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:46567 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46567 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=483 (was 457) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=42 (was 14) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=17287 (was 17325) 2024-12-08T00:50:25,017 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=209, OpenFileDescriptor=483, MaxFileDescriptor=1048576, SystemLoadAverage=42, ProcessCount=11, AvailableMemoryMB=17287 2024-12-08T00:50:25,017 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-08T00:50:25,017 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b762e3f-9801-d913-5546-f2f7c758b88c/hadoop.log.dir so I do NOT create it in target/test-data/33d99b1d-8c99-1a06-f136-b66e714a7006 2024-12-08T00:50:25,017 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b762e3f-9801-d913-5546-f2f7c758b88c/hadoop.tmp.dir so I do NOT create it in target/test-data/33d99b1d-8c99-1a06-f136-b66e714a7006 2024-12-08T00:50:25,017 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/33d99b1d-8c99-1a06-f136-b66e714a7006/cluster_0946dd5b-4961-eb56-9129-ebe145a0235c, deleteOnExit=true 2024-12-08T00:50:25,017 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-08T00:50:25,018 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/33d99b1d-8c99-1a06-f136-b66e714a7006/test.cache.data in system properties and HBase conf 2024-12-08T00:50:25,018 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/33d99b1d-8c99-1a06-f136-b66e714a7006/hadoop.tmp.dir in system properties and HBase conf 2024-12-08T00:50:25,018 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/33d99b1d-8c99-1a06-f136-b66e714a7006/hadoop.log.dir in system properties and HBase conf 2024-12-08T00:50:25,018 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/33d99b1d-8c99-1a06-f136-b66e714a7006/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-08T00:50:25,018 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/33d99b1d-8c99-1a06-f136-b66e714a7006/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-08T00:50:25,018 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-08T00:50:25,018 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-08T00:50:25,018 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/33d99b1d-8c99-1a06-f136-b66e714a7006/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-08T00:50:25,018 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/33d99b1d-8c99-1a06-f136-b66e714a7006/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-08T00:50:25,018 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/33d99b1d-8c99-1a06-f136-b66e714a7006/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-08T00:50:25,018 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/33d99b1d-8c99-1a06-f136-b66e714a7006/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-08T00:50:25,018 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/33d99b1d-8c99-1a06-f136-b66e714a7006/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-08T00:50:25,018 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/33d99b1d-8c99-1a06-f136-b66e714a7006/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-08T00:50:25,018 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/33d99b1d-8c99-1a06-f136-b66e714a7006/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-08T00:50:25,018 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/33d99b1d-8c99-1a06-f136-b66e714a7006/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-08T00:50:25,019 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/33d99b1d-8c99-1a06-f136-b66e714a7006/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-08T00:50:25,019 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/33d99b1d-8c99-1a06-f136-b66e714a7006/nfs.dump.dir in system properties and HBase conf 2024-12-08T00:50:25,019 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/33d99b1d-8c99-1a06-f136-b66e714a7006/java.io.tmpdir in system properties and HBase conf 2024-12-08T00:50:25,019 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/33d99b1d-8c99-1a06-f136-b66e714a7006/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-08T00:50:25,019 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/33d99b1d-8c99-1a06-f136-b66e714a7006/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-08T00:50:25,019 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/33d99b1d-8c99-1a06-f136-b66e714a7006/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-08T00:50:25,030 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-08T00:50:25,205 INFO [regionserver/0f983e3e5be1:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T00:50:25,305 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T00:50:25,308 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T00:50:25,312 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T00:50:25,312 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T00:50:25,312 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-08T00:50:25,312 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T00:50:25,313 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@9538b4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/33d99b1d-8c99-1a06-f136-b66e714a7006/hadoop.log.dir/,AVAILABLE} 2024-12-08T00:50:25,313 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@831cfa{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T00:50:25,402 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4355bcf7{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/33d99b1d-8c99-1a06-f136-b66e714a7006/java.io.tmpdir/jetty-localhost-38785-hadoop-hdfs-3_4_1-tests_jar-_-any-14232014226344971962/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-08T00:50:25,403 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@65f4ddad{HTTP/1.1, (http/1.1)}{localhost:38785} 2024-12-08T00:50:25,403 INFO [Time-limited test {}] server.Server(415): Started @244833ms 2024-12-08T00:50:25,412 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-08T00:50:25,413 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:25,443 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:25,639 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T00:50:25,641 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T00:50:25,641 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T00:50:25,641 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T00:50:25,641 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T00:50:25,642 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@18b8d361{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/33d99b1d-8c99-1a06-f136-b66e714a7006/hadoop.log.dir/,AVAILABLE} 2024-12-08T00:50:25,642 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@18472eb7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T00:50:25,732 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2c08daf8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/33d99b1d-8c99-1a06-f136-b66e714a7006/java.io.tmpdir/jetty-localhost-42379-hadoop-hdfs-3_4_1-tests_jar-_-any-2790051950656966433/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T00:50:25,733 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1a78718c{HTTP/1.1, (http/1.1)}{localhost:42379} 2024-12-08T00:50:25,733 INFO [Time-limited test {}] server.Server(415): Started @245163ms 2024-12-08T00:50:25,734 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T00:50:25,757 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T00:50:25,759 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T00:50:25,760 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T00:50:25,760 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T00:50:25,760 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-08T00:50:25,761 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@58c8e0ef{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/33d99b1d-8c99-1a06-f136-b66e714a7006/hadoop.log.dir/,AVAILABLE} 2024-12-08T00:50:25,761 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@49e6a30e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T00:50:25,850 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6d46424b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/33d99b1d-8c99-1a06-f136-b66e714a7006/java.io.tmpdir/jetty-localhost-37827-hadoop-hdfs-3_4_1-tests_jar-_-any-1097575720110082404/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T00:50:25,851 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6ef289a6{HTTP/1.1, (http/1.1)}{localhost:37827} 2024-12-08T00:50:25,851 INFO [Time-limited test {}] server.Server(415): Started @245281ms 2024-12-08T00:50:25,852 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T00:50:25,911 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-08T00:50:25,912 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-08T00:50:25,912 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-08T00:50:25,912 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-08T00:50:26,401 WARN [Thread-1959 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/33d99b1d-8c99-1a06-f136-b66e714a7006/cluster_0946dd5b-4961-eb56-9129-ebe145a0235c/data/data1/current/BP-1938729576-172.17.0.2-1733619025033/current, will proceed with Du for space computation calculation, 2024-12-08T00:50:26,401 WARN [Thread-1960 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/33d99b1d-8c99-1a06-f136-b66e714a7006/cluster_0946dd5b-4961-eb56-9129-ebe145a0235c/data/data2/current/BP-1938729576-172.17.0.2-1733619025033/current, will proceed with Du for space computation calculation, 2024-12-08T00:50:26,414 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:26,426 WARN [Thread-1923 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T00:50:26,428 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbe35d07bfe4e7b6 with lease ID 0x85f035e09c06cc4e: Processing first storage report for DS-5ea6de33-47eb-4d92-9742-2bbb76c1fd8e from datanode DatanodeRegistration(127.0.0.1:39927, datanodeUuid=456eafa8-bfeb-40a9-918d-47a178e6e0a5, infoPort=34617, infoSecurePort=0, ipcPort=42681, storageInfo=lv=-57;cid=testClusterID;nsid=958990184;c=1733619025033) 2024-12-08T00:50:26,428 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbe35d07bfe4e7b6 with lease ID 0x85f035e09c06cc4e: from storage DS-5ea6de33-47eb-4d92-9742-2bbb76c1fd8e node DatanodeRegistration(127.0.0.1:39927, datanodeUuid=456eafa8-bfeb-40a9-918d-47a178e6e0a5, infoPort=34617, infoSecurePort=0, ipcPort=42681, storageInfo=lv=-57;cid=testClusterID;nsid=958990184;c=1733619025033), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T00:50:26,428 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbe35d07bfe4e7b6 with lease ID 0x85f035e09c06cc4e: Processing first storage report for DS-9836d5be-9f32-4cc7-a267-809ddbb08cb6 from datanode DatanodeRegistration(127.0.0.1:39927, datanodeUuid=456eafa8-bfeb-40a9-918d-47a178e6e0a5, infoPort=34617, infoSecurePort=0, ipcPort=42681, storageInfo=lv=-57;cid=testClusterID;nsid=958990184;c=1733619025033) 2024-12-08T00:50:26,428 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbe35d07bfe4e7b6 with lease ID 0x85f035e09c06cc4e: from storage DS-9836d5be-9f32-4cc7-a267-809ddbb08cb6 node DatanodeRegistration(127.0.0.1:39927, datanodeUuid=456eafa8-bfeb-40a9-918d-47a178e6e0a5, infoPort=34617, infoSecurePort=0, ipcPort=42681, storageInfo=lv=-57;cid=testClusterID;nsid=958990184;c=1733619025033), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T00:50:26,444 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:26,552 WARN [Thread-1970 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/33d99b1d-8c99-1a06-f136-b66e714a7006/cluster_0946dd5b-4961-eb56-9129-ebe145a0235c/data/data3/current/BP-1938729576-172.17.0.2-1733619025033/current, will proceed with Du for space computation calculation, 2024-12-08T00:50:26,552 WARN [Thread-1971 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/33d99b1d-8c99-1a06-f136-b66e714a7006/cluster_0946dd5b-4961-eb56-9129-ebe145a0235c/data/data4/current/BP-1938729576-172.17.0.2-1733619025033/current, will proceed with Du for space computation calculation, 2024-12-08T00:50:26,570 WARN [Thread-1946 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T00:50:26,572 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe4218e1e962091ab with lease ID 0x85f035e09c06cc4f: Processing first storage report for DS-28b2f9af-886a-430b-8d47-e2db54a04c62 from datanode DatanodeRegistration(127.0.0.1:33409, datanodeUuid=42e28748-3dcb-4583-b257-0a9e28be7ac4, infoPort=46357, infoSecurePort=0, ipcPort=46667, storageInfo=lv=-57;cid=testClusterID;nsid=958990184;c=1733619025033) 2024-12-08T00:50:26,572 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe4218e1e962091ab with lease ID 0x85f035e09c06cc4f: from storage DS-28b2f9af-886a-430b-8d47-e2db54a04c62 node DatanodeRegistration(127.0.0.1:33409, datanodeUuid=42e28748-3dcb-4583-b257-0a9e28be7ac4, infoPort=46357, infoSecurePort=0, ipcPort=46667, storageInfo=lv=-57;cid=testClusterID;nsid=958990184;c=1733619025033), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T00:50:26,572 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe4218e1e962091ab with lease ID 0x85f035e09c06cc4f: Processing first storage report for DS-a4113f0e-8347-41a8-b0fb-50e7e291d58f from datanode DatanodeRegistration(127.0.0.1:33409, datanodeUuid=42e28748-3dcb-4583-b257-0a9e28be7ac4, infoPort=46357, infoSecurePort=0, ipcPort=46667, storageInfo=lv=-57;cid=testClusterID;nsid=958990184;c=1733619025033) 2024-12-08T00:50:26,572 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe4218e1e962091ab with lease ID 0x85f035e09c06cc4f: from storage DS-a4113f0e-8347-41a8-b0fb-50e7e291d58f node DatanodeRegistration(127.0.0.1:33409, datanodeUuid=42e28748-3dcb-4583-b257-0a9e28be7ac4, infoPort=46357, infoSecurePort=0, ipcPort=46667, storageInfo=lv=-57;cid=testClusterID;nsid=958990184;c=1733619025033), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T00:50:26,574 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/33d99b1d-8c99-1a06-f136-b66e714a7006 2024-12-08T00:50:26,577 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/33d99b1d-8c99-1a06-f136-b66e714a7006/cluster_0946dd5b-4961-eb56-9129-ebe145a0235c/zookeeper_0, clientPort=61574, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/33d99b1d-8c99-1a06-f136-b66e714a7006/cluster_0946dd5b-4961-eb56-9129-ebe145a0235c/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/33d99b1d-8c99-1a06-f136-b66e714a7006/cluster_0946dd5b-4961-eb56-9129-ebe145a0235c/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-08T00:50:26,578 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=61574 2024-12-08T00:50:26,578 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:50:26,579 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:50:26,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741825_1001 (size=7) 2024-12-08T00:50:26,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33409 is added to blk_1073741825_1001 (size=7) 2024-12-08T00:50:26,589 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92 with version=8 2024-12-08T00:50:26,589 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/hbase-staging 2024-12-08T00:50:26,592 INFO [Time-limited test {}] client.ConnectionUtils(128): master/0f983e3e5be1:0 server-side Connection retries=45 2024-12-08T00:50:26,592 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T00:50:26,592 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T00:50:26,592 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T00:50:26,592 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T00:50:26,592 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T00:50:26,592 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-08T00:50:26,592 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T00:50:26,593 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40597 2024-12-08T00:50:26,594 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:40597 connecting to ZooKeeper ensemble=127.0.0.1:61574 2024-12-08T00:50:26,646 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:405970x0, quorum=127.0.0.1:61574, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T00:50:26,646 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40597-0x10002f4c92d0000 connected 2024-12-08T00:50:26,714 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:50:26,715 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:50:26,717 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40597-0x10002f4c92d0000, quorum=127.0.0.1:61574, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T00:50:26,717 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92, hbase.cluster.distributed=false 2024-12-08T00:50:26,719 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40597-0x10002f4c92d0000, quorum=127.0.0.1:61574, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T00:50:26,719 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40597 2024-12-08T00:50:26,719 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40597 2024-12-08T00:50:26,719 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40597 2024-12-08T00:50:26,720 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40597 2024-12-08T00:50:26,720 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40597 2024-12-08T00:50:26,732 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/0f983e3e5be1:0 server-side Connection retries=45 2024-12-08T00:50:26,732 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T00:50:26,732 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T00:50:26,732 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T00:50:26,732 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T00:50:26,733 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T00:50:26,733 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-08T00:50:26,733 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T00:50:26,733 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45839 2024-12-08T00:50:26,734 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45839 connecting to ZooKeeper ensemble=127.0.0.1:61574 2024-12-08T00:50:26,734 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:50:26,736 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:50:26,747 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:458390x0, quorum=127.0.0.1:61574, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T00:50:26,747 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45839-0x10002f4c92d0001 connected 2024-12-08T00:50:26,748 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45839-0x10002f4c92d0001, quorum=127.0.0.1:61574, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T00:50:26,748 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-08T00:50:26,748 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-08T00:50:26,749 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45839-0x10002f4c92d0001, quorum=127.0.0.1:61574, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-08T00:50:26,750 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45839-0x10002f4c92d0001, quorum=127.0.0.1:61574, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T00:50:26,750 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45839 2024-12-08T00:50:26,750 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45839 2024-12-08T00:50:26,750 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45839 2024-12-08T00:50:26,751 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45839 2024-12-08T00:50:26,751 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45839 2024-12-08T00:50:26,763 DEBUG [M:0;0f983e3e5be1:40597 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;0f983e3e5be1:40597 2024-12-08T00:50:26,764 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/0f983e3e5be1,40597,1733619026591 2024-12-08T00:50:26,772 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45839-0x10002f4c92d0001, quorum=127.0.0.1:61574, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T00:50:26,772 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40597-0x10002f4c92d0000, quorum=127.0.0.1:61574, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T00:50:26,773 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40597-0x10002f4c92d0000, quorum=127.0.0.1:61574, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/0f983e3e5be1,40597,1733619026591 2024-12-08T00:50:26,780 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45839-0x10002f4c92d0001, quorum=127.0.0.1:61574, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-08T00:50:26,780 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40597-0x10002f4c92d0000, quorum=127.0.0.1:61574, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:50:26,780 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45839-0x10002f4c92d0001, quorum=127.0.0.1:61574, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:50:26,781 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40597-0x10002f4c92d0000, quorum=127.0.0.1:61574, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-08T00:50:26,782 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/0f983e3e5be1,40597,1733619026591 from backup master directory 2024-12-08T00:50:26,789 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40597-0x10002f4c92d0000, quorum=127.0.0.1:61574, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/0f983e3e5be1,40597,1733619026591 2024-12-08T00:50:26,789 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45839-0x10002f4c92d0001, quorum=127.0.0.1:61574, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T00:50:26,789 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40597-0x10002f4c92d0000, quorum=127.0.0.1:61574, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T00:50:26,789 WARN [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T00:50:26,789 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=0f983e3e5be1,40597,1733619026591 2024-12-08T00:50:26,795 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/hbase.id] with ID: 6dccec2d-1e61-4027-8b95-28a110aafc4d 2024-12-08T00:50:26,795 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/.tmp/hbase.id 2024-12-08T00:50:26,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33409 is added to blk_1073741826_1002 (size=42) 2024-12-08T00:50:26,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741826_1002 (size=42) 2024-12-08T00:50:26,803 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/.tmp/hbase.id]:[hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/hbase.id] 2024-12-08T00:50:26,814 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:50:26,814 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-08T00:50:26,815 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-08T00:50:26,826 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45839-0x10002f4c92d0001, quorum=127.0.0.1:61574, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:50:26,826 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40597-0x10002f4c92d0000, quorum=127.0.0.1:61574, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:50:26,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741827_1003 (size=196) 2024-12-08T00:50:26,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33409 is added to blk_1073741827_1003 (size=196) 2024-12-08T00:50:26,833 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T00:50:26,834 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-08T00:50:26,834 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T00:50:26,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33409 is added to blk_1073741828_1004 (size=1189) 2024-12-08T00:50:26,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741828_1004 (size=1189) 2024-12-08T00:50:26,843 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/MasterData/data/master/store 2024-12-08T00:50:26,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741829_1005 (size=34) 2024-12-08T00:50:26,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33409 is added to blk_1073741829_1005 (size=34) 2024-12-08T00:50:26,849 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:50:26,849 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-08T00:50:26,849 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:50:26,849 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:50:26,849 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-08T00:50:26,849 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:50:26,849 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:50:26,849 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733619026849Disabling compacts and flushes for region at 1733619026849Disabling writes for close at 1733619026849Writing region close event to WAL at 1733619026849Closed at 1733619026849 2024-12-08T00:50:26,850 WARN [master/0f983e3e5be1:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/MasterData/data/master/store/.initializing 2024-12-08T00:50:26,850 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/MasterData/WALs/0f983e3e5be1,40597,1733619026591 2024-12-08T00:50:26,852 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0f983e3e5be1%2C40597%2C1733619026591, suffix=, logDir=hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/MasterData/WALs/0f983e3e5be1,40597,1733619026591, archiveDir=hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/MasterData/oldWALs, maxLogs=10 2024-12-08T00:50:26,853 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0f983e3e5be1%2C40597%2C1733619026591.1733619026853 2024-12-08T00:50:26,857 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/MasterData/WALs/0f983e3e5be1,40597,1733619026591/0f983e3e5be1%2C40597%2C1733619026591.1733619026853 2024-12-08T00:50:26,858 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34617:34617),(127.0.0.1/127.0.0.1:46357:46357)] 2024-12-08T00:50:26,858 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-08T00:50:26,858 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:50:26,858 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:50:26,858 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:50:26,862 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:50:26,863 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-08T00:50:26,863 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:50:26,864 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:50:26,864 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:50:26,864 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-08T00:50:26,864 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:50:26,865 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T00:50:26,865 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:50:26,866 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-08T00:50:26,866 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:50:26,866 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T00:50:26,866 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:50:26,867 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-08T00:50:26,867 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:50:26,867 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T00:50:26,868 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:50:26,868 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:50:26,868 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:50:26,869 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:50:26,870 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:50:26,870 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-08T00:50:26,871 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:50:26,877 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T00:50:26,877 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=821901, jitterRate=0.045101478695869446}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-08T00:50:26,877 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733619026859Initializing all the Stores at 1733619026859Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733619026859Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733619026862 (+3 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733619026862Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733619026862Cleaning up temporary data from old regions at 1733619026870 (+8 ms)Region opened successfully at 1733619026877 (+7 ms) 2024-12-08T00:50:26,878 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-08T00:50:26,882 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4b5d751d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0f983e3e5be1/172.17.0.2:0 2024-12-08T00:50:26,882 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-08T00:50:26,883 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-08T00:50:26,883 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-08T00:50:26,883 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-08T00:50:26,883 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-08T00:50:26,883 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-08T00:50:26,883 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-08T00:50:26,886 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-08T00:50:26,887 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40597-0x10002f4c92d0000, quorum=127.0.0.1:61574, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-08T00:50:26,914 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-08T00:50:26,914 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-08T00:50:26,915 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40597-0x10002f4c92d0000, quorum=127.0.0.1:61574, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-08T00:50:26,922 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-08T00:50:26,922 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-08T00:50:26,923 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40597-0x10002f4c92d0000, quorum=127.0.0.1:61574, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-08T00:50:26,930 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-08T00:50:26,931 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40597-0x10002f4c92d0000, quorum=127.0.0.1:61574, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-08T00:50:26,939 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-08T00:50:26,941 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40597-0x10002f4c92d0000, quorum=127.0.0.1:61574, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-08T00:50:26,951 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-08T00:50:26,960 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45839-0x10002f4c92d0001, quorum=127.0.0.1:61574, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T00:50:26,960 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45839-0x10002f4c92d0001, quorum=127.0.0.1:61574, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:50:26,960 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40597-0x10002f4c92d0000, quorum=127.0.0.1:61574, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T00:50:26,960 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40597-0x10002f4c92d0000, quorum=127.0.0.1:61574, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:50:26,961 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=0f983e3e5be1,40597,1733619026591, sessionid=0x10002f4c92d0000, setting cluster-up flag (Was=false) 2024-12-08T00:50:26,976 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45839-0x10002f4c92d0001, quorum=127.0.0.1:61574, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:50:26,976 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40597-0x10002f4c92d0000, quorum=127.0.0.1:61574, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:50:27,001 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-08T00:50:27,003 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0f983e3e5be1,40597,1733619026591 2024-12-08T00:50:27,022 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40597-0x10002f4c92d0000, quorum=127.0.0.1:61574, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:50:27,022 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45839-0x10002f4c92d0001, quorum=127.0.0.1:61574, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:50:27,047 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-08T00:50:27,048 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0f983e3e5be1,40597,1733619026591 2024-12-08T00:50:27,049 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-08T00:50:27,051 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-08T00:50:27,051 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-08T00:50:27,051 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-08T00:50:27,051 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 0f983e3e5be1,40597,1733619026591 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-08T00:50:27,053 INFO [RS:0;0f983e3e5be1:45839 {}] regionserver.HRegionServer(746): ClusterId : 6dccec2d-1e61-4027-8b95-28a110aafc4d 2024-12-08T00:50:27,053 DEBUG [RS:0;0f983e3e5be1:45839 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-08T00:50:27,053 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/0f983e3e5be1:0, corePoolSize=5, maxPoolSize=5 2024-12-08T00:50:27,053 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/0f983e3e5be1:0, corePoolSize=5, maxPoolSize=5 2024-12-08T00:50:27,053 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/0f983e3e5be1:0, corePoolSize=5, maxPoolSize=5 2024-12-08T00:50:27,053 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/0f983e3e5be1:0, corePoolSize=5, maxPoolSize=5 2024-12-08T00:50:27,053 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/0f983e3e5be1:0, corePoolSize=10, maxPoolSize=10 2024-12-08T00:50:27,053 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:50:27,053 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/0f983e3e5be1:0, corePoolSize=2, maxPoolSize=2 2024-12-08T00:50:27,053 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:50:27,053 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733619057053 2024-12-08T00:50:27,054 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-08T00:50:27,054 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-08T00:50:27,054 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-08T00:50:27,054 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-08T00:50:27,054 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-08T00:50:27,054 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-08T00:50:27,054 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-08T00:50:27,054 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-08T00:50:27,054 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-08T00:50:27,054 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-08T00:50:27,054 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T00:50:27,054 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-08T00:50:27,054 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-08T00:50:27,054 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-08T00:50:27,055 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:50:27,055 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-08T00:50:27,056 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/0f983e3e5be1:0:becomeActiveMaster-HFileCleaner.large.0-1733619027054,5,FailOnTimeoutGroup] 2024-12-08T00:50:27,057 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/0f983e3e5be1:0:becomeActiveMaster-HFileCleaner.small.0-1733619027056,5,FailOnTimeoutGroup] 2024-12-08T00:50:27,057 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-08T00:50:27,057 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-08T00:50:27,057 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-08T00:50:27,057 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-08T00:50:27,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33409 is added to blk_1073741831_1007 (size=1321) 2024-12-08T00:50:27,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741831_1007 (size=1321) 2024-12-08T00:50:27,061 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-08T00:50:27,061 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92 2024-12-08T00:50:27,065 DEBUG [RS:0;0f983e3e5be1:45839 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-08T00:50:27,065 DEBUG [RS:0;0f983e3e5be1:45839 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-08T00:50:27,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33409 is added to blk_1073741832_1008 (size=32) 2024-12-08T00:50:27,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741832_1008 (size=32) 2024-12-08T00:50:27,068 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:50:27,069 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-08T00:50:27,070 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-08T00:50:27,070 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:50:27,071 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:50:27,071 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-08T00:50:27,072 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-08T00:50:27,072 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:50:27,073 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:50:27,073 DEBUG [RS:0;0f983e3e5be1:45839 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-08T00:50:27,073 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-08T00:50:27,073 DEBUG [RS:0;0f983e3e5be1:45839 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b491c7a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0f983e3e5be1/172.17.0.2:0 2024-12-08T00:50:27,074 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-08T00:50:27,074 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:50:27,075 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:50:27,075 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-08T00:50:27,076 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-08T00:50:27,076 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:50:27,077 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:50:27,077 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-08T00:50:27,078 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/hbase/meta/1588230740 2024-12-08T00:50:27,078 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/hbase/meta/1588230740 2024-12-08T00:50:27,079 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-08T00:50:27,079 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-08T00:50:27,080 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-08T00:50:27,081 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-08T00:50:27,083 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T00:50:27,084 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=832567, jitterRate=0.05866439640522003}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-08T00:50:27,084 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733619027068Initializing all the Stores at 1733619027069 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733619027069Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733619027069Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733619027069Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733619027069Cleaning up temporary data from old regions at 1733619027079 (+10 ms)Region opened successfully at 1733619027084 (+5 ms) 2024-12-08T00:50:27,084 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-08T00:50:27,084 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-08T00:50:27,084 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-08T00:50:27,084 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-08T00:50:27,084 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-08T00:50:27,085 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-08T00:50:27,085 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733619027084Disabling compacts and flushes for region at 1733619027084Disabling writes for close at 1733619027084Writing region close event to WAL at 1733619027084Closed at 1733619027084 2024-12-08T00:50:27,086 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T00:50:27,086 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-08T00:50:27,086 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-08T00:50:27,087 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-08T00:50:27,087 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-08T00:50:27,088 DEBUG [RS:0;0f983e3e5be1:45839 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;0f983e3e5be1:45839 2024-12-08T00:50:27,088 INFO [RS:0;0f983e3e5be1:45839 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-08T00:50:27,088 INFO [RS:0;0f983e3e5be1:45839 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-08T00:50:27,088 DEBUG [RS:0;0f983e3e5be1:45839 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-08T00:50:27,088 INFO [RS:0;0f983e3e5be1:45839 {}] regionserver.HRegionServer(2659): reportForDuty to master=0f983e3e5be1,40597,1733619026591 with port=45839, startcode=1733619026732 2024-12-08T00:50:27,088 DEBUG [RS:0;0f983e3e5be1:45839 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-08T00:50:27,090 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48625, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-12-08T00:50:27,090 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40597 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 0f983e3e5be1,45839,1733619026732 2024-12-08T00:50:27,090 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40597 {}] master.ServerManager(517): Registering regionserver=0f983e3e5be1,45839,1733619026732 2024-12-08T00:50:27,092 DEBUG [RS:0;0f983e3e5be1:45839 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92 2024-12-08T00:50:27,092 DEBUG [RS:0;0f983e3e5be1:45839 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41911 2024-12-08T00:50:27,092 DEBUG [RS:0;0f983e3e5be1:45839 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-08T00:50:27,101 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40597-0x10002f4c92d0000, quorum=127.0.0.1:61574, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T00:50:27,102 DEBUG [RS:0;0f983e3e5be1:45839 {}] zookeeper.ZKUtil(111): regionserver:45839-0x10002f4c92d0001, quorum=127.0.0.1:61574, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/0f983e3e5be1,45839,1733619026732 2024-12-08T00:50:27,102 WARN [RS:0;0f983e3e5be1:45839 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T00:50:27,102 INFO [RS:0;0f983e3e5be1:45839 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T00:50:27,102 DEBUG [RS:0;0f983e3e5be1:45839 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/WALs/0f983e3e5be1,45839,1733619026732 2024-12-08T00:50:27,102 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [0f983e3e5be1,45839,1733619026732] 2024-12-08T00:50:27,105 INFO [RS:0;0f983e3e5be1:45839 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-08T00:50:27,106 INFO [RS:0;0f983e3e5be1:45839 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-08T00:50:27,107 INFO [RS:0;0f983e3e5be1:45839 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-08T00:50:27,107 INFO [RS:0;0f983e3e5be1:45839 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T00:50:27,107 INFO [RS:0;0f983e3e5be1:45839 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-08T00:50:27,108 INFO [RS:0;0f983e3e5be1:45839 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-08T00:50:27,108 INFO [RS:0;0f983e3e5be1:45839 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-08T00:50:27,108 DEBUG [RS:0;0f983e3e5be1:45839 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:50:27,108 DEBUG [RS:0;0f983e3e5be1:45839 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:50:27,108 DEBUG [RS:0;0f983e3e5be1:45839 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:50:27,108 DEBUG [RS:0;0f983e3e5be1:45839 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:50:27,108 DEBUG [RS:0;0f983e3e5be1:45839 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:50:27,108 DEBUG [RS:0;0f983e3e5be1:45839 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/0f983e3e5be1:0, corePoolSize=2, maxPoolSize=2 2024-12-08T00:50:27,108 DEBUG [RS:0;0f983e3e5be1:45839 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:50:27,108 DEBUG [RS:0;0f983e3e5be1:45839 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:50:27,108 DEBUG [RS:0;0f983e3e5be1:45839 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:50:27,108 DEBUG [RS:0;0f983e3e5be1:45839 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:50:27,109 DEBUG [RS:0;0f983e3e5be1:45839 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:50:27,109 DEBUG [RS:0;0f983e3e5be1:45839 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:50:27,109 DEBUG [RS:0;0f983e3e5be1:45839 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/0f983e3e5be1:0, corePoolSize=3, maxPoolSize=3 2024-12-08T00:50:27,109 DEBUG [RS:0;0f983e3e5be1:45839 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0, corePoolSize=3, maxPoolSize=3 2024-12-08T00:50:27,109 INFO [RS:0;0f983e3e5be1:45839 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T00:50:27,109 INFO [RS:0;0f983e3e5be1:45839 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T00:50:27,109 INFO [RS:0;0f983e3e5be1:45839 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T00:50:27,109 INFO [RS:0;0f983e3e5be1:45839 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-08T00:50:27,109 INFO [RS:0;0f983e3e5be1:45839 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-08T00:50:27,109 INFO [RS:0;0f983e3e5be1:45839 {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,45839,1733619026732-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T00:50:27,125 INFO [RS:0;0f983e3e5be1:45839 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-08T00:50:27,125 INFO [RS:0;0f983e3e5be1:45839 {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,45839,1733619026732-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T00:50:27,125 INFO [RS:0;0f983e3e5be1:45839 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T00:50:27,125 INFO [RS:0;0f983e3e5be1:45839 {}] regionserver.Replication(171): 0f983e3e5be1,45839,1733619026732 started 2024-12-08T00:50:27,136 INFO [RS:0;0f983e3e5be1:45839 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T00:50:27,136 INFO [RS:0;0f983e3e5be1:45839 {}] regionserver.HRegionServer(1482): Serving as 0f983e3e5be1,45839,1733619026732, RpcServer on 0f983e3e5be1/172.17.0.2:45839, sessionid=0x10002f4c92d0001 2024-12-08T00:50:27,136 DEBUG [RS:0;0f983e3e5be1:45839 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-08T00:50:27,136 DEBUG [RS:0;0f983e3e5be1:45839 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 0f983e3e5be1,45839,1733619026732 2024-12-08T00:50:27,136 DEBUG [RS:0;0f983e3e5be1:45839 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0f983e3e5be1,45839,1733619026732' 2024-12-08T00:50:27,136 DEBUG [RS:0;0f983e3e5be1:45839 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-08T00:50:27,137 DEBUG [RS:0;0f983e3e5be1:45839 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-08T00:50:27,137 DEBUG [RS:0;0f983e3e5be1:45839 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-08T00:50:27,137 DEBUG [RS:0;0f983e3e5be1:45839 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-08T00:50:27,137 DEBUG [RS:0;0f983e3e5be1:45839 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 0f983e3e5be1,45839,1733619026732 2024-12-08T00:50:27,138 DEBUG [RS:0;0f983e3e5be1:45839 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0f983e3e5be1,45839,1733619026732' 2024-12-08T00:50:27,138 DEBUG [RS:0;0f983e3e5be1:45839 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-08T00:50:27,138 DEBUG [RS:0;0f983e3e5be1:45839 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-08T00:50:27,138 DEBUG [RS:0;0f983e3e5be1:45839 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-08T00:50:27,138 INFO [RS:0;0f983e3e5be1:45839 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-08T00:50:27,138 INFO [RS:0;0f983e3e5be1:45839 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-08T00:50:27,238 WARN [0f983e3e5be1:40597 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-08T00:50:27,241 INFO [RS:0;0f983e3e5be1:45839 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0f983e3e5be1%2C45839%2C1733619026732, suffix=, logDir=hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/WALs/0f983e3e5be1,45839,1733619026732, archiveDir=hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/oldWALs, maxLogs=32 2024-12-08T00:50:27,242 INFO [RS:0;0f983e3e5be1:45839 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0f983e3e5be1%2C45839%2C1733619026732.1733619027242 2024-12-08T00:50:27,252 INFO [RS:0;0f983e3e5be1:45839 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/WALs/0f983e3e5be1,45839,1733619026732/0f983e3e5be1%2C45839%2C1733619026732.1733619027242 2024-12-08T00:50:27,255 DEBUG [RS:0;0f983e3e5be1:45839 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46357:46357),(127.0.0.1/127.0.0.1:34617:34617)] 2024-12-08T00:50:27,414 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:27,445 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:27,488 DEBUG [0f983e3e5be1:40597 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-08T00:50:27,489 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=0f983e3e5be1,45839,1733619026732 2024-12-08T00:50:27,493 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0f983e3e5be1,45839,1733619026732, state=OPENING 2024-12-08T00:50:27,518 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-08T00:50:27,527 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40597-0x10002f4c92d0000, quorum=127.0.0.1:61574, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:50:27,527 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45839-0x10002f4c92d0001, quorum=127.0.0.1:61574, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:50:27,528 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-08T00:50:27,529 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T00:50:27,529 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T00:50:27,529 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=0f983e3e5be1,45839,1733619026732}] 2024-12-08T00:50:27,685 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-08T00:50:27,688 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38239, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-08T00:50:27,692 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-08T00:50:27,693 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T00:50:27,695 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0f983e3e5be1%2C45839%2C1733619026732.meta, suffix=.meta, logDir=hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/WALs/0f983e3e5be1,45839,1733619026732, archiveDir=hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/oldWALs, maxLogs=32 2024-12-08T00:50:27,696 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 0f983e3e5be1%2C45839%2C1733619026732.meta.1733619027696.meta 2024-12-08T00:50:27,704 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/WALs/0f983e3e5be1,45839,1733619026732/0f983e3e5be1%2C45839%2C1733619026732.meta.1733619027696.meta 2024-12-08T00:50:27,706 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46357:46357),(127.0.0.1/127.0.0.1:34617:34617)] 2024-12-08T00:50:27,707 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-08T00:50:27,708 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-08T00:50:27,708 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-08T00:50:27,708 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-08T00:50:27,708 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-08T00:50:27,708 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:50:27,708 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-08T00:50:27,708 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-08T00:50:27,709 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-08T00:50:27,710 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-08T00:50:27,710 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:50:27,710 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:50:27,710 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-08T00:50:27,711 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-08T00:50:27,711 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:50:27,711 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:50:27,711 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-08T00:50:27,712 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-08T00:50:27,712 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:50:27,712 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:50:27,712 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-08T00:50:27,713 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-08T00:50:27,713 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:50:27,713 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:50:27,713 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-08T00:50:27,714 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/hbase/meta/1588230740 2024-12-08T00:50:27,715 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/hbase/meta/1588230740 2024-12-08T00:50:27,716 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-08T00:50:27,716 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-08T00:50:27,716 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-08T00:50:27,717 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-08T00:50:27,718 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=776830, jitterRate=-0.012209683656692505}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-08T00:50:27,718 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-08T00:50:27,718 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733619027708Writing region info on filesystem at 1733619027708Initializing all the Stores at 1733619027709 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733619027709Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733619027709Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733619027709Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733619027709Cleaning up temporary data from old regions at 1733619027716 (+7 ms)Running coprocessor post-open hooks at 1733619027718 (+2 ms)Region opened successfully at 1733619027718 2024-12-08T00:50:27,719 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733619027685 2024-12-08T00:50:27,721 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-08T00:50:27,721 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-08T00:50:27,722 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=0f983e3e5be1,45839,1733619026732 2024-12-08T00:50:27,722 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0f983e3e5be1,45839,1733619026732, state=OPEN 2024-12-08T00:50:27,804 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45839-0x10002f4c92d0001, quorum=127.0.0.1:61574, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T00:50:27,804 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40597-0x10002f4c92d0000, quorum=127.0.0.1:61574, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T00:50:27,804 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=0f983e3e5be1,45839,1733619026732 2024-12-08T00:50:27,804 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T00:50:27,804 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T00:50:27,809 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-08T00:50:27,809 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=0f983e3e5be1,45839,1733619026732 in 275 msec 2024-12-08T00:50:27,814 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-08T00:50:27,814 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 723 msec 2024-12-08T00:50:27,815 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T00:50:27,815 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-08T00:50:27,817 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-08T00:50:27,817 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=0f983e3e5be1,45839,1733619026732, seqNum=-1] 2024-12-08T00:50:27,817 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:50:27,818 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33859, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:50:27,823 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 773 msec 2024-12-08T00:50:27,823 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733619027823, completionTime=-1 2024-12-08T00:50:27,823 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-08T00:50:27,824 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-08T00:50:27,825 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-08T00:50:27,825 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733619087825 2024-12-08T00:50:27,825 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733619147825 2024-12-08T00:50:27,825 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-12-08T00:50:27,825 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,40597,1733619026591-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T00:50:27,826 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,40597,1733619026591-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T00:50:27,826 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,40597,1733619026591-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T00:50:27,826 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-0f983e3e5be1:40597, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T00:50:27,826 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-08T00:50:27,826 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-08T00:50:27,827 DEBUG [master/0f983e3e5be1:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-08T00:50:27,829 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.040sec 2024-12-08T00:50:27,829 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-08T00:50:27,829 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-08T00:50:27,829 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-08T00:50:27,829 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-08T00:50:27,829 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-08T00:50:27,829 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,40597,1733619026591-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T00:50:27,829 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,40597,1733619026591-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-08T00:50:27,832 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-08T00:50:27,832 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-08T00:50:27,832 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,40597,1733619026591-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T00:50:27,853 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6b49c57a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:50:27,853 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 0f983e3e5be1,40597,-1 for getting cluster id 2024-12-08T00:50:27,854 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-08T00:50:27,855 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6dccec2d-1e61-4027-8b95-28a110aafc4d' 2024-12-08T00:50:27,855 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-08T00:50:27,855 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6dccec2d-1e61-4027-8b95-28a110aafc4d" 2024-12-08T00:50:27,855 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6fab5f12, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:50:27,855 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [0f983e3e5be1,40597,-1] 2024-12-08T00:50:27,855 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-08T00:50:27,856 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:50:27,857 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56596, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-08T00:50:27,858 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72a0aae9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:50:27,858 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-08T00:50:27,859 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=0f983e3e5be1,45839,1733619026732, seqNum=-1] 2024-12-08T00:50:27,859 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:50:27,861 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38846, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:50:27,862 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=0f983e3e5be1,40597,1733619026591 2024-12-08T00:50:27,862 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:50:27,865 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-08T00:50:27,865 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-08T00:50:27,866 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is 0f983e3e5be1,40597,1733619026591 2024-12-08T00:50:27,866 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@1504dcab 2024-12-08T00:50:27,866 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-08T00:50:27,867 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56610, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-08T00:50:27,867 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40597 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-08T00:50:27,867 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40597 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-08T00:50:27,868 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40597 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T00:50:27,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40597 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-12-08T00:50:27,871 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-08T00:50:27,871 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:50:27,871 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40597 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-12-08T00:50:27,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40597 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-08T00:50:27,872 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-08T00:50:27,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741835_1011 (size=381) 2024-12-08T00:50:27,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33409 is added to blk_1073741835_1011 (size=381) 2024-12-08T00:50:27,880 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => bcea31935b7ba636d16fda5ff1400227, NAME => 'TestLogRolling-testLogRolling,,1733619027867.bcea31935b7ba636d16fda5ff1400227.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92 2024-12-08T00:50:27,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741836_1012 (size=64) 2024-12-08T00:50:27,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33409 is added to blk_1073741836_1012 (size=64) 2024-12-08T00:50:27,886 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1733619027867.bcea31935b7ba636d16fda5ff1400227.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:50:27,886 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing bcea31935b7ba636d16fda5ff1400227, disabling compactions & flushes 2024-12-08T00:50:27,886 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1733619027867.bcea31935b7ba636d16fda5ff1400227. 2024-12-08T00:50:27,886 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1733619027867.bcea31935b7ba636d16fda5ff1400227. 2024-12-08T00:50:27,886 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1733619027867.bcea31935b7ba636d16fda5ff1400227. after waiting 0 ms 2024-12-08T00:50:27,886 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1733619027867.bcea31935b7ba636d16fda5ff1400227. 2024-12-08T00:50:27,886 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1733619027867.bcea31935b7ba636d16fda5ff1400227. 2024-12-08T00:50:27,886 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for bcea31935b7ba636d16fda5ff1400227: Waiting for close lock at 1733619027886Disabling compacts and flushes for region at 1733619027886Disabling writes for close at 1733619027886Writing region close event to WAL at 1733619027886Closed at 1733619027886 2024-12-08T00:50:27,887 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-08T00:50:27,888 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1733619027867.bcea31935b7ba636d16fda5ff1400227.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1733619027887"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733619027887"}]},"ts":"1733619027887"} 2024-12-08T00:50:27,890 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-08T00:50:27,890 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-08T00:50:27,890 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733619027890"}]},"ts":"1733619027890"} 2024-12-08T00:50:27,892 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-12-08T00:50:27,893 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=bcea31935b7ba636d16fda5ff1400227, ASSIGN}] 2024-12-08T00:50:27,894 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=bcea31935b7ba636d16fda5ff1400227, ASSIGN 2024-12-08T00:50:27,895 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=bcea31935b7ba636d16fda5ff1400227, ASSIGN; state=OFFLINE, location=0f983e3e5be1,45839,1733619026732; forceNewPlan=false, retain=false 2024-12-08T00:50:28,045 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=bcea31935b7ba636d16fda5ff1400227, regionState=OPENING, regionLocation=0f983e3e5be1,45839,1733619026732 2024-12-08T00:50:28,049 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=bcea31935b7ba636d16fda5ff1400227, ASSIGN because future has completed 2024-12-08T00:50:28,050 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure bcea31935b7ba636d16fda5ff1400227, server=0f983e3e5be1,45839,1733619026732}] 2024-12-08T00:50:28,209 INFO [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1733619027867.bcea31935b7ba636d16fda5ff1400227. 2024-12-08T00:50:28,209 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => bcea31935b7ba636d16fda5ff1400227, NAME => 'TestLogRolling-testLogRolling,,1733619027867.bcea31935b7ba636d16fda5ff1400227.', STARTKEY => '', ENDKEY => ''} 2024-12-08T00:50:28,209 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling bcea31935b7ba636d16fda5ff1400227 2024-12-08T00:50:28,209 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1733619027867.bcea31935b7ba636d16fda5ff1400227.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:50:28,210 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for bcea31935b7ba636d16fda5ff1400227 2024-12-08T00:50:28,210 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for bcea31935b7ba636d16fda5ff1400227 2024-12-08T00:50:28,211 INFO [StoreOpener-bcea31935b7ba636d16fda5ff1400227-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region bcea31935b7ba636d16fda5ff1400227 2024-12-08T00:50:28,213 INFO [StoreOpener-bcea31935b7ba636d16fda5ff1400227-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region bcea31935b7ba636d16fda5ff1400227 columnFamilyName info 2024-12-08T00:50:28,214 DEBUG [StoreOpener-bcea31935b7ba636d16fda5ff1400227-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:50:28,214 INFO [StoreOpener-bcea31935b7ba636d16fda5ff1400227-1 {}] regionserver.HStore(327): Store=bcea31935b7ba636d16fda5ff1400227/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T00:50:28,215 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for bcea31935b7ba636d16fda5ff1400227 2024-12-08T00:50:28,216 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227 2024-12-08T00:50:28,216 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227 2024-12-08T00:50:28,217 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for bcea31935b7ba636d16fda5ff1400227 2024-12-08T00:50:28,217 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for bcea31935b7ba636d16fda5ff1400227 2024-12-08T00:50:28,220 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for bcea31935b7ba636d16fda5ff1400227 2024-12-08T00:50:28,222 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T00:50:28,222 INFO [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened bcea31935b7ba636d16fda5ff1400227; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=722811, jitterRate=-0.08089926838874817}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-08T00:50:28,223 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for bcea31935b7ba636d16fda5ff1400227 2024-12-08T00:50:28,223 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for bcea31935b7ba636d16fda5ff1400227: Running coprocessor pre-open hook at 1733619028210Writing region info on filesystem at 1733619028210Initializing all the Stores at 1733619028211 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733619028211Cleaning up temporary data from old regions at 1733619028217 (+6 ms)Running coprocessor post-open hooks at 1733619028223 (+6 ms)Region opened successfully at 1733619028223 2024-12-08T00:50:28,224 INFO [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1733619027867.bcea31935b7ba636d16fda5ff1400227., pid=6, masterSystemTime=1733619028204 2024-12-08T00:50:28,227 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1733619027867.bcea31935b7ba636d16fda5ff1400227. 2024-12-08T00:50:28,227 INFO [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1733619027867.bcea31935b7ba636d16fda5ff1400227. 2024-12-08T00:50:28,228 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=bcea31935b7ba636d16fda5ff1400227, regionState=OPEN, openSeqNum=2, regionLocation=0f983e3e5be1,45839,1733619026732 2024-12-08T00:50:28,230 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure bcea31935b7ba636d16fda5ff1400227, server=0f983e3e5be1,45839,1733619026732 because future has completed 2024-12-08T00:50:28,235 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-08T00:50:28,235 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure bcea31935b7ba636d16fda5ff1400227, server=0f983e3e5be1,45839,1733619026732 in 182 msec 2024-12-08T00:50:28,239 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-08T00:50:28,239 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=bcea31935b7ba636d16fda5ff1400227, ASSIGN in 342 msec 2024-12-08T00:50:28,240 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-08T00:50:28,240 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733619028240"}]},"ts":"1733619028240"} 2024-12-08T00:50:28,243 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-12-08T00:50:28,244 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-08T00:50:28,247 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 376 msec 2024-12-08T00:50:28,415 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:28,445 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:29,416 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:29,446 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:29,460 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:29,461 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:29,461 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:29,461 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:29,462 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:29,462 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:29,463 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:29,464 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:29,480 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:29,480 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:29,480 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:29,480 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:29,480 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:29,480 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:29,483 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:29,483 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:29,483 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:29,484 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:29,989 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-08T00:50:29,992 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:29,993 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:29,993 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:29,994 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:29,995 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:29,995 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:29,995 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:29,995 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:30,009 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:30,009 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:30,010 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:30,010 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:30,010 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:30,010 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:30,012 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:30,013 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:30,013 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:30,015 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:30,417 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:30,447 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:31,418 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:31,448 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:32,419 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:32,449 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:33,105 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-08T00:50:33,107 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-12-08T00:50:33,420 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:33,450 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:34,422 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:34,451 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:35,423 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:35,452 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:35,911 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-08T00:50:35,912 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-08T00:50:35,913 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-08T00:50:35,913 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-08T00:50:35,914 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-08T00:50:35,914 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-08T00:50:35,915 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-12-08T00:50:35,915 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-12-08T00:50:36,424 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:36,453 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:37,425 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:37,454 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:37,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40597 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-08T00:50:37,967 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-12-08T00:50:37,967 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-12-08T00:50:37,973 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-12-08T00:50:37,973 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1733619027867.bcea31935b7ba636d16fda5ff1400227. 2024-12-08T00:50:37,977 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1733619027867.bcea31935b7ba636d16fda5ff1400227., hostname=0f983e3e5be1,45839,1733619026732, seqNum=2] 2024-12-08T00:50:37,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45839 {}] regionserver.HRegion(8855): Flush requested on bcea31935b7ba636d16fda5ff1400227 2024-12-08T00:50:37,996 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bcea31935b7ba636d16fda5ff1400227 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-08T00:50:38,015 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/.tmp/info/cd4e60d0bce144dc8aa7befd615e4091 is 1080, key is row0001/info:/1733619037979/Put/seqid=0 2024-12-08T00:50:38,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33409 is added to blk_1073741837_1013 (size=12509) 2024-12-08T00:50:38,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741837_1013 (size=12509) 2024-12-08T00:50:38,020 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/.tmp/info/cd4e60d0bce144dc8aa7befd615e4091 2024-12-08T00:50:38,027 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/.tmp/info/cd4e60d0bce144dc8aa7befd615e4091 as hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/cd4e60d0bce144dc8aa7befd615e4091 2024-12-08T00:50:38,032 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/cd4e60d0bce144dc8aa7befd615e4091, entries=7, sequenceid=11, filesize=12.2 K 2024-12-08T00:50:38,033 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=17.86 KB/18292 for bcea31935b7ba636d16fda5ff1400227 in 37ms, sequenceid=11, compaction requested=false 2024-12-08T00:50:38,033 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bcea31935b7ba636d16fda5ff1400227: 2024-12-08T00:50:38,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45839 {}] regionserver.HRegion(8855): Flush requested on bcea31935b7ba636d16fda5ff1400227 2024-12-08T00:50:38,034 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bcea31935b7ba636d16fda5ff1400227 1/1 column families, dataSize=18.91 KB heapSize=20.50 KB 2024-12-08T00:50:38,038 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/.tmp/info/0b14ec84746b4311ad26c21ba706d30c is 1080, key is row0008/info:/1733619037998/Put/seqid=0 2024-12-08T00:50:38,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33409 is added to blk_1073741838_1014 (size=24376) 2024-12-08T00:50:38,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741838_1014 (size=24376) 2024-12-08T00:50:38,043 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=18.91 KB at sequenceid=32 (bloomFilter=true), to=hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/.tmp/info/0b14ec84746b4311ad26c21ba706d30c 2024-12-08T00:50:38,049 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/.tmp/info/0b14ec84746b4311ad26c21ba706d30c as hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/0b14ec84746b4311ad26c21ba706d30c 2024-12-08T00:50:38,053 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/0b14ec84746b4311ad26c21ba706d30c, entries=18, sequenceid=32, filesize=23.8 K 2024-12-08T00:50:38,054 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~18.91 KB/19368, heapSize ~20.48 KB/20976, currentSize=7.36 KB/7532 for bcea31935b7ba636d16fda5ff1400227 in 20ms, sequenceid=32, compaction requested=false 2024-12-08T00:50:38,054 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bcea31935b7ba636d16fda5ff1400227: 2024-12-08T00:50:38,054 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.0 K, sizeToCheck=16.0 K 2024-12-08T00:50:38,054 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T00:50:38,054 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/0b14ec84746b4311ad26c21ba706d30c because midkey is the same as first or last row 2024-12-08T00:50:38,427 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:38,455 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:39,428 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:39,456 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:40,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45839 {}] regionserver.HRegion(8855): Flush requested on bcea31935b7ba636d16fda5ff1400227 2024-12-08T00:50:40,053 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bcea31935b7ba636d16fda5ff1400227 1/1 column families, dataSize=8.41 KB heapSize=9.25 KB 2024-12-08T00:50:40,060 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/.tmp/info/64c65a2490a046ed81ca429d7bf96fd9 is 1080, key is row0026/info:/1733619038035/Put/seqid=0 2024-12-08T00:50:40,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33409 is added to blk_1073741839_1015 (size=13586) 2024-12-08T00:50:40,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741839_1015 (size=13586) 2024-12-08T00:50:40,073 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.41 KB at sequenceid=43 (bloomFilter=true), to=hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/.tmp/info/64c65a2490a046ed81ca429d7bf96fd9 2024-12-08T00:50:40,078 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/.tmp/info/64c65a2490a046ed81ca429d7bf96fd9 as hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/64c65a2490a046ed81ca429d7bf96fd9 2024-12-08T00:50:40,084 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/64c65a2490a046ed81ca429d7bf96fd9, entries=8, sequenceid=43, filesize=13.3 K 2024-12-08T00:50:40,085 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~8.41 KB/8608, heapSize ~9.23 KB/9456, currentSize=13.66 KB/13988 for bcea31935b7ba636d16fda5ff1400227 in 32ms, sequenceid=43, compaction requested=true 2024-12-08T00:50:40,085 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bcea31935b7ba636d16fda5ff1400227: 2024-12-08T00:50:40,085 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=49.3 K, sizeToCheck=16.0 K 2024-12-08T00:50:40,085 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T00:50:40,085 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/0b14ec84746b4311ad26c21ba706d30c because midkey is the same as first or last row 2024-12-08T00:50:40,085 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bcea31935b7ba636d16fda5ff1400227:info, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:50:40,085 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:50:40,085 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:50:40,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45839 {}] regionserver.HRegion(8855): Flush requested on bcea31935b7ba636d16fda5ff1400227 2024-12-08T00:50:40,086 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bcea31935b7ba636d16fda5ff1400227 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-12-08T00:50:40,087 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 50471 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:50:40,087 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.HStore(1541): bcea31935b7ba636d16fda5ff1400227/info is initiating minor compaction (all files) 2024-12-08T00:50:40,087 INFO [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of bcea31935b7ba636d16fda5ff1400227/info in TestLogRolling-testLogRolling,,1733619027867.bcea31935b7ba636d16fda5ff1400227. 2024-12-08T00:50:40,087 INFO [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/cd4e60d0bce144dc8aa7befd615e4091, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/0b14ec84746b4311ad26c21ba706d30c, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/64c65a2490a046ed81ca429d7bf96fd9] into tmpdir=hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/.tmp, totalSize=49.3 K 2024-12-08T00:50:40,087 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] compactions.Compactor(225): Compacting cd4e60d0bce144dc8aa7befd615e4091, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733619037979 2024-12-08T00:50:40,088 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0b14ec84746b4311ad26c21ba706d30c, keycount=18, bloomtype=ROW, size=23.8 K, encoding=NONE, compression=NONE, seqNum=32, earliestPutTs=1733619037998 2024-12-08T00:50:40,088 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] compactions.Compactor(225): Compacting 64c65a2490a046ed81ca429d7bf96fd9, keycount=8, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=43, earliestPutTs=1733619038035 2024-12-08T00:50:40,090 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/.tmp/info/2b67dd55b44c478485907aa442b96730 is 1080, key is row0034/info:/1733619040055/Put/seqid=0 2024-12-08T00:50:40,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741840_1016 (size=20064) 2024-12-08T00:50:40,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33409 is added to blk_1073741840_1016 (size=20064) 2024-12-08T00:50:40,096 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/.tmp/info/2b67dd55b44c478485907aa442b96730 2024-12-08T00:50:40,101 INFO [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bcea31935b7ba636d16fda5ff1400227#info#compaction#59 average throughput is 16.93 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:50:40,101 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/.tmp/info/c56ea584c80341d68aa9d55ce0a18493 is 1080, key is row0001/info:/1733619037979/Put/seqid=0 2024-12-08T00:50:40,102 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/.tmp/info/2b67dd55b44c478485907aa442b96730 as hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/2b67dd55b44c478485907aa442b96730 2024-12-08T00:50:40,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741841_1017 (size=40670) 2024-12-08T00:50:40,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33409 is added to blk_1073741841_1017 (size=40670) 2024-12-08T00:50:40,108 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/2b67dd55b44c478485907aa442b96730, entries=14, sequenceid=60, filesize=19.6 K 2024-12-08T00:50:40,109 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=10.51 KB/10760 for bcea31935b7ba636d16fda5ff1400227 in 23ms, sequenceid=60, compaction requested=false 2024-12-08T00:50:40,109 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bcea31935b7ba636d16fda5ff1400227: 2024-12-08T00:50:40,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45839 {}] regionserver.HRegion(8855): Flush requested on bcea31935b7ba636d16fda5ff1400227 2024-12-08T00:50:40,109 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=68.9 K, sizeToCheck=16.0 K 2024-12-08T00:50:40,110 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T00:50:40,110 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/0b14ec84746b4311ad26c21ba706d30c because midkey is the same as first or last row 2024-12-08T00:50:40,110 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bcea31935b7ba636d16fda5ff1400227 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-12-08T00:50:40,113 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/.tmp/info/c56ea584c80341d68aa9d55ce0a18493 as hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/c56ea584c80341d68aa9d55ce0a18493 2024-12-08T00:50:40,115 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/.tmp/info/869699194d8a4945b381e0a27866ec33 is 1080, key is row0048/info:/1733619040087/Put/seqid=0 2024-12-08T00:50:40,120 INFO [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in bcea31935b7ba636d16fda5ff1400227/info of bcea31935b7ba636d16fda5ff1400227 into c56ea584c80341d68aa9d55ce0a18493(size=39.7 K), total size for store is 59.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:50:40,120 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for bcea31935b7ba636d16fda5ff1400227: 2024-12-08T00:50:40,120 INFO [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733619027867.bcea31935b7ba636d16fda5ff1400227., storeName=bcea31935b7ba636d16fda5ff1400227/info, priority=13, startTime=1733619040085; duration=0sec 2024-12-08T00:50:40,120 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=59.3 K, sizeToCheck=16.0 K 2024-12-08T00:50:40,120 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T00:50:40,120 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/c56ea584c80341d68aa9d55ce0a18493 because midkey is the same as first or last row 2024-12-08T00:50:40,120 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=59.3 K, sizeToCheck=16.0 K 2024-12-08T00:50:40,120 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T00:50:40,120 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/c56ea584c80341d68aa9d55ce0a18493 because midkey is the same as first or last row 2024-12-08T00:50:40,120 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=59.3 K, sizeToCheck=16.0 K 2024-12-08T00:50:40,120 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T00:50:40,120 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/c56ea584c80341d68aa9d55ce0a18493 because midkey is the same as first or last row 2024-12-08T00:50:40,120 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:50:40,121 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bcea31935b7ba636d16fda5ff1400227:info 2024-12-08T00:50:40,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33409 is added to blk_1073741842_1018 (size=16817) 2024-12-08T00:50:40,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741842_1018 (size=16817) 2024-12-08T00:50:40,123 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/.tmp/info/869699194d8a4945b381e0a27866ec33 2024-12-08T00:50:40,128 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/.tmp/info/869699194d8a4945b381e0a27866ec33 as hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/869699194d8a4945b381e0a27866ec33 2024-12-08T00:50:40,133 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/869699194d8a4945b381e0a27866ec33, entries=11, sequenceid=74, filesize=16.4 K 2024-12-08T00:50:40,134 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=6.30 KB/6456 for bcea31935b7ba636d16fda5ff1400227 in 24ms, sequenceid=74, compaction requested=true 2024-12-08T00:50:40,134 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bcea31935b7ba636d16fda5ff1400227: 2024-12-08T00:50:40,134 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=75.7 K, sizeToCheck=16.0 K 2024-12-08T00:50:40,134 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T00:50:40,134 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/c56ea584c80341d68aa9d55ce0a18493 because midkey is the same as first or last row 2024-12-08T00:50:40,134 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bcea31935b7ba636d16fda5ff1400227:info, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:50:40,134 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:50:40,134 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:50:40,135 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 77551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:50:40,135 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.HStore(1541): bcea31935b7ba636d16fda5ff1400227/info is initiating minor compaction (all files) 2024-12-08T00:50:40,135 INFO [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of bcea31935b7ba636d16fda5ff1400227/info in TestLogRolling-testLogRolling,,1733619027867.bcea31935b7ba636d16fda5ff1400227. 2024-12-08T00:50:40,135 INFO [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/c56ea584c80341d68aa9d55ce0a18493, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/2b67dd55b44c478485907aa442b96730, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/869699194d8a4945b381e0a27866ec33] into tmpdir=hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/.tmp, totalSize=75.7 K 2024-12-08T00:50:40,136 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] compactions.Compactor(225): Compacting c56ea584c80341d68aa9d55ce0a18493, keycount=33, bloomtype=ROW, size=39.7 K, encoding=NONE, compression=NONE, seqNum=43, earliestPutTs=1733619037979 2024-12-08T00:50:40,136 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2b67dd55b44c478485907aa442b96730, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=60, earliestPutTs=1733619040055 2024-12-08T00:50:40,136 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] compactions.Compactor(225): Compacting 869699194d8a4945b381e0a27866ec33, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1733619040087 2024-12-08T00:50:40,146 INFO [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bcea31935b7ba636d16fda5ff1400227#info#compaction#61 average throughput is 29.76 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:50:40,146 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/.tmp/info/c3dc2bfc5de6402a82307510a2b58c4e is 1080, key is row0001/info:/1733619037979/Put/seqid=0 2024-12-08T00:50:40,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741843_1019 (size=67766) 2024-12-08T00:50:40,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33409 is added to blk_1073741843_1019 (size=67766) 2024-12-08T00:50:40,157 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/.tmp/info/c3dc2bfc5de6402a82307510a2b58c4e as hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/c3dc2bfc5de6402a82307510a2b58c4e 2024-12-08T00:50:40,163 INFO [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in bcea31935b7ba636d16fda5ff1400227/info of bcea31935b7ba636d16fda5ff1400227 into c3dc2bfc5de6402a82307510a2b58c4e(size=66.2 K), total size for store is 66.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:50:40,164 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for bcea31935b7ba636d16fda5ff1400227: 2024-12-08T00:50:40,164 INFO [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733619027867.bcea31935b7ba636d16fda5ff1400227., storeName=bcea31935b7ba636d16fda5ff1400227/info, priority=13, startTime=1733619040134; duration=0sec 2024-12-08T00:50:40,164 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=66.2 K, sizeToCheck=16.0 K 2024-12-08T00:50:40,164 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T00:50:40,164 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/c3dc2bfc5de6402a82307510a2b58c4e because midkey is the same as first or last row 2024-12-08T00:50:40,164 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=66.2 K, sizeToCheck=16.0 K 2024-12-08T00:50:40,164 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T00:50:40,164 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/c3dc2bfc5de6402a82307510a2b58c4e because midkey is the same as first or last row 2024-12-08T00:50:40,164 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=66.2 K, sizeToCheck=16.0 K 2024-12-08T00:50:40,164 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T00:50:40,164 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/c3dc2bfc5de6402a82307510a2b58c4e because midkey is the same as first or last row 2024-12-08T00:50:40,164 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:50:40,164 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bcea31935b7ba636d16fda5ff1400227:info 2024-12-08T00:50:40,429 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:40,456 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:41,430 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:41,457 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:42,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45839 {}] regionserver.HRegion(8855): Flush requested on bcea31935b7ba636d16fda5ff1400227 2024-12-08T00:50:42,129 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bcea31935b7ba636d16fda5ff1400227 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-08T00:50:42,138 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/.tmp/info/5117da3a0aa94fc287b887c8fa950b35 is 1080, key is row0059/info:/1733619040111/Put/seqid=0 2024-12-08T00:50:42,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741844_1020 (size=12509) 2024-12-08T00:50:42,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33409 is added to blk_1073741844_1020 (size=12509) 2024-12-08T00:50:42,144 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=86 (bloomFilter=true), to=hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/.tmp/info/5117da3a0aa94fc287b887c8fa950b35 2024-12-08T00:50:42,152 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/.tmp/info/5117da3a0aa94fc287b887c8fa950b35 as hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/5117da3a0aa94fc287b887c8fa950b35 2024-12-08T00:50:42,157 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/5117da3a0aa94fc287b887c8fa950b35, entries=7, sequenceid=86, filesize=12.2 K 2024-12-08T00:50:42,158 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10760 for bcea31935b7ba636d16fda5ff1400227 in 29ms, sequenceid=86, compaction requested=false 2024-12-08T00:50:42,158 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bcea31935b7ba636d16fda5ff1400227: 2024-12-08T00:50:42,158 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=78.4 K, sizeToCheck=16.0 K 2024-12-08T00:50:42,158 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T00:50:42,159 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/c3dc2bfc5de6402a82307510a2b58c4e because midkey is the same as first or last row 2024-12-08T00:50:42,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45839 {}] regionserver.HRegion(8855): Flush requested on bcea31935b7ba636d16fda5ff1400227 2024-12-08T00:50:42,160 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bcea31935b7ba636d16fda5ff1400227 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-12-08T00:50:42,163 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/.tmp/info/bf89f00da8044df18bf1da9a91cf9672 is 1080, key is row0066/info:/1733619042132/Put/seqid=0 2024-12-08T00:50:42,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741845_1021 (size=16817) 2024-12-08T00:50:42,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33409 is added to blk_1073741845_1021 (size=16817) 2024-12-08T00:50:42,168 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=100 (bloomFilter=true), to=hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/.tmp/info/bf89f00da8044df18bf1da9a91cf9672 2024-12-08T00:50:42,175 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/.tmp/info/bf89f00da8044df18bf1da9a91cf9672 as hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/bf89f00da8044df18bf1da9a91cf9672 2024-12-08T00:50:42,180 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/bf89f00da8044df18bf1da9a91cf9672, entries=11, sequenceid=100, filesize=16.4 K 2024-12-08T00:50:42,181 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=10.51 KB/10760 for bcea31935b7ba636d16fda5ff1400227 in 21ms, sequenceid=100, compaction requested=true 2024-12-08T00:50:42,181 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bcea31935b7ba636d16fda5ff1400227: 2024-12-08T00:50:42,181 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=94.8 K, sizeToCheck=16.0 K 2024-12-08T00:50:42,181 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T00:50:42,181 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/c3dc2bfc5de6402a82307510a2b58c4e because midkey is the same as first or last row 2024-12-08T00:50:42,181 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bcea31935b7ba636d16fda5ff1400227:info, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:50:42,181 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:50:42,181 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:50:42,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45839 {}] regionserver.HRegion(8855): Flush requested on bcea31935b7ba636d16fda5ff1400227 2024-12-08T00:50:42,182 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bcea31935b7ba636d16fda5ff1400227 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-12-08T00:50:42,182 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 97092 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:50:42,182 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.HStore(1541): bcea31935b7ba636d16fda5ff1400227/info is initiating minor compaction (all files) 2024-12-08T00:50:42,183 INFO [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of bcea31935b7ba636d16fda5ff1400227/info in TestLogRolling-testLogRolling,,1733619027867.bcea31935b7ba636d16fda5ff1400227. 2024-12-08T00:50:42,183 INFO [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/c3dc2bfc5de6402a82307510a2b58c4e, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/5117da3a0aa94fc287b887c8fa950b35, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/bf89f00da8044df18bf1da9a91cf9672] into tmpdir=hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/.tmp, totalSize=94.8 K 2024-12-08T00:50:42,183 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] compactions.Compactor(225): Compacting c3dc2bfc5de6402a82307510a2b58c4e, keycount=58, bloomtype=ROW, size=66.2 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1733619037979 2024-12-08T00:50:42,183 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5117da3a0aa94fc287b887c8fa950b35, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=86, earliestPutTs=1733619040111 2024-12-08T00:50:42,184 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] compactions.Compactor(225): Compacting bf89f00da8044df18bf1da9a91cf9672, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=100, earliestPutTs=1733619042132 2024-12-08T00:50:42,186 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/.tmp/info/e890c85e66a84cc58840e923dc7ebb92 is 1080, key is row0077/info:/1733619042161/Put/seqid=0 2024-12-08T00:50:42,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33409 is added to blk_1073741846_1022 (size=16817) 2024-12-08T00:50:42,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741846_1022 (size=16817) 2024-12-08T00:50:42,192 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/.tmp/info/e890c85e66a84cc58840e923dc7ebb92 2024-12-08T00:50:42,199 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/.tmp/info/e890c85e66a84cc58840e923dc7ebb92 as hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/e890c85e66a84cc58840e923dc7ebb92 2024-12-08T00:50:42,199 INFO [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bcea31935b7ba636d16fda5ff1400227#info#compaction#65 average throughput is 19.50 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:50:42,200 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/.tmp/info/81017759f2744aff91e22bab9e3a13e2 is 1080, key is row0001/info:/1733619037979/Put/seqid=0 2024-12-08T00:50:42,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741847_1023 (size=87327) 2024-12-08T00:50:42,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33409 is added to blk_1073741847_1023 (size=87327) 2024-12-08T00:50:42,205 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/e890c85e66a84cc58840e923dc7ebb92, entries=11, sequenceid=114, filesize=16.4 K 2024-12-08T00:50:42,206 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=9.46 KB/9684 for bcea31935b7ba636d16fda5ff1400227 in 24ms, sequenceid=114, compaction requested=false 2024-12-08T00:50:42,206 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bcea31935b7ba636d16fda5ff1400227: 2024-12-08T00:50:42,206 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=111.2 K, sizeToCheck=16.0 K 2024-12-08T00:50:42,206 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T00:50:42,206 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/c3dc2bfc5de6402a82307510a2b58c4e because midkey is the same as first or last row 2024-12-08T00:50:42,209 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/.tmp/info/81017759f2744aff91e22bab9e3a13e2 as hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/81017759f2744aff91e22bab9e3a13e2 2024-12-08T00:50:42,215 INFO [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in bcea31935b7ba636d16fda5ff1400227/info of bcea31935b7ba636d16fda5ff1400227 into 81017759f2744aff91e22bab9e3a13e2(size=85.3 K), total size for store is 101.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:50:42,215 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for bcea31935b7ba636d16fda5ff1400227: 2024-12-08T00:50:42,215 INFO [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733619027867.bcea31935b7ba636d16fda5ff1400227., storeName=bcea31935b7ba636d16fda5ff1400227/info, priority=13, startTime=1733619042181; duration=0sec 2024-12-08T00:50:42,215 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=101.7 K, sizeToCheck=16.0 K 2024-12-08T00:50:42,215 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T00:50:42,215 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=101.7 K, sizeToCheck=16.0 K 2024-12-08T00:50:42,215 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T00:50:42,215 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=101.7 K, sizeToCheck=16.0 K 2024-12-08T00:50:42,215 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T00:50:42,216 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1733619027867.bcea31935b7ba636d16fda5ff1400227., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:50:42,216 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:50:42,216 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bcea31935b7ba636d16fda5ff1400227:info 2024-12-08T00:50:42,217 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40597 {}] assignment.AssignmentManager(1363): Split request from 0f983e3e5be1,45839,1733619026732, parent={ENCODED => bcea31935b7ba636d16fda5ff1400227, NAME => 'TestLogRolling-testLogRolling,,1733619027867.bcea31935b7ba636d16fda5ff1400227.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-12-08T00:50:42,222 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40597 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=0f983e3e5be1,45839,1733619026732 2024-12-08T00:50:42,225 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40597 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=bcea31935b7ba636d16fda5ff1400227, daughterA=b7786dedf2ce8631623c86f93f6e1d74, daughterB=c94c6b105330c7ab03f7d694b18f1b38 2024-12-08T00:50:42,226 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=bcea31935b7ba636d16fda5ff1400227, daughterA=b7786dedf2ce8631623c86f93f6e1d74, daughterB=c94c6b105330c7ab03f7d694b18f1b38 2024-12-08T00:50:42,226 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=bcea31935b7ba636d16fda5ff1400227, daughterA=b7786dedf2ce8631623c86f93f6e1d74, daughterB=c94c6b105330c7ab03f7d694b18f1b38 2024-12-08T00:50:42,226 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=bcea31935b7ba636d16fda5ff1400227, daughterA=b7786dedf2ce8631623c86f93f6e1d74, daughterB=c94c6b105330c7ab03f7d694b18f1b38 2024-12-08T00:50:42,233 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=bcea31935b7ba636d16fda5ff1400227, UNASSIGN}] 2024-12-08T00:50:42,234 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=bcea31935b7ba636d16fda5ff1400227, UNASSIGN 2024-12-08T00:50:42,236 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=bcea31935b7ba636d16fda5ff1400227, regionState=CLOSING, regionLocation=0f983e3e5be1,45839,1733619026732 2024-12-08T00:50:42,238 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=bcea31935b7ba636d16fda5ff1400227, UNASSIGN because future has completed 2024-12-08T00:50:42,238 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-08T00:50:42,238 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure bcea31935b7ba636d16fda5ff1400227, server=0f983e3e5be1,45839,1733619026732}] 2024-12-08T00:50:42,395 INFO [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close bcea31935b7ba636d16fda5ff1400227 2024-12-08T00:50:42,395 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-12-08T00:50:42,395 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing bcea31935b7ba636d16fda5ff1400227, disabling compactions & flushes 2024-12-08T00:50:42,395 INFO [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1733619027867.bcea31935b7ba636d16fda5ff1400227. 2024-12-08T00:50:42,395 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1733619027867.bcea31935b7ba636d16fda5ff1400227. 2024-12-08T00:50:42,395 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1733619027867.bcea31935b7ba636d16fda5ff1400227. after waiting 0 ms 2024-12-08T00:50:42,395 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1733619027867.bcea31935b7ba636d16fda5ff1400227. 2024-12-08T00:50:42,396 INFO [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(2902): Flushing bcea31935b7ba636d16fda5ff1400227 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-12-08T00:50:42,400 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/.tmp/info/8e5aae031e91498bac239b58bdd7ade5 is 1080, key is row0088/info:/1733619042183/Put/seqid=0 2024-12-08T00:50:42,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741848_1024 (size=14663) 2024-12-08T00:50:42,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33409 is added to blk_1073741848_1024 (size=14663) 2024-12-08T00:50:42,405 INFO [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/.tmp/info/8e5aae031e91498bac239b58bdd7ade5 2024-12-08T00:50:42,411 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/.tmp/info/8e5aae031e91498bac239b58bdd7ade5 as hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/8e5aae031e91498bac239b58bdd7ade5 2024-12-08T00:50:42,416 INFO [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/8e5aae031e91498bac239b58bdd7ade5, entries=9, sequenceid=127, filesize=14.3 K 2024-12-08T00:50:42,417 INFO [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9684, heapSize ~10.36 KB/10608, currentSize=0 B/0 for bcea31935b7ba636d16fda5ff1400227 in 22ms, sequenceid=127, compaction requested=true 2024-12-08T00:50:42,418 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733619027867.bcea31935b7ba636d16fda5ff1400227.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/cd4e60d0bce144dc8aa7befd615e4091, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/0b14ec84746b4311ad26c21ba706d30c, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/c56ea584c80341d68aa9d55ce0a18493, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/64c65a2490a046ed81ca429d7bf96fd9, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/2b67dd55b44c478485907aa442b96730, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/c3dc2bfc5de6402a82307510a2b58c4e, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/869699194d8a4945b381e0a27866ec33, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/5117da3a0aa94fc287b887c8fa950b35, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/bf89f00da8044df18bf1da9a91cf9672] to archive 2024-12-08T00:50:42,419 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733619027867.bcea31935b7ba636d16fda5ff1400227.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-08T00:50:42,421 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733619027867.bcea31935b7ba636d16fda5ff1400227.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/cd4e60d0bce144dc8aa7befd615e4091 to hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/archive/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/cd4e60d0bce144dc8aa7befd615e4091 2024-12-08T00:50:42,422 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733619027867.bcea31935b7ba636d16fda5ff1400227.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/0b14ec84746b4311ad26c21ba706d30c to hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/archive/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/0b14ec84746b4311ad26c21ba706d30c 2024-12-08T00:50:42,423 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733619027867.bcea31935b7ba636d16fda5ff1400227.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/c56ea584c80341d68aa9d55ce0a18493 to hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/archive/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/c56ea584c80341d68aa9d55ce0a18493 2024-12-08T00:50:42,424 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733619027867.bcea31935b7ba636d16fda5ff1400227.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/64c65a2490a046ed81ca429d7bf96fd9 to hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/archive/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/64c65a2490a046ed81ca429d7bf96fd9 2024-12-08T00:50:42,425 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733619027867.bcea31935b7ba636d16fda5ff1400227.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/2b67dd55b44c478485907aa442b96730 to hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/archive/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/2b67dd55b44c478485907aa442b96730 2024-12-08T00:50:42,425 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733619027867.bcea31935b7ba636d16fda5ff1400227.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/c3dc2bfc5de6402a82307510a2b58c4e to hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/archive/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/c3dc2bfc5de6402a82307510a2b58c4e 2024-12-08T00:50:42,426 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733619027867.bcea31935b7ba636d16fda5ff1400227.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/869699194d8a4945b381e0a27866ec33 to hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/archive/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/869699194d8a4945b381e0a27866ec33 2024-12-08T00:50:42,428 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733619027867.bcea31935b7ba636d16fda5ff1400227.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/5117da3a0aa94fc287b887c8fa950b35 to hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/archive/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/5117da3a0aa94fc287b887c8fa950b35 2024-12-08T00:50:42,429 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733619027867.bcea31935b7ba636d16fda5ff1400227.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/bf89f00da8044df18bf1da9a91cf9672 to hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/archive/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/bf89f00da8044df18bf1da9a91cf9672 2024-12-08T00:50:42,431 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:42,435 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/recovered.edits/130.seqid, newMaxSeqId=130, maxSeqId=1 2024-12-08T00:50:42,436 INFO [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1733619027867.bcea31935b7ba636d16fda5ff1400227. 2024-12-08T00:50:42,436 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for bcea31935b7ba636d16fda5ff1400227: Waiting for close lock at 1733619042395Running coprocessor pre-close hooks at 1733619042395Disabling compacts and flushes for region at 1733619042395Disabling writes for close at 1733619042395Obtaining lock to block concurrent updates at 1733619042396 (+1 ms)Preparing flush snapshotting stores in bcea31935b7ba636d16fda5ff1400227 at 1733619042396Finished memstore snapshotting TestLogRolling-testLogRolling,,1733619027867.bcea31935b7ba636d16fda5ff1400227., syncing WAL and waiting on mvcc, flushsize=dataSize=9684, getHeapSize=10608, getOffHeapSize=0, getCellsCount=9 at 1733619042396Flushing stores of TestLogRolling-testLogRolling,,1733619027867.bcea31935b7ba636d16fda5ff1400227. at 1733619042396Flushing bcea31935b7ba636d16fda5ff1400227/info: creating writer at 1733619042397 (+1 ms)Flushing bcea31935b7ba636d16fda5ff1400227/info: appending metadata at 1733619042399 (+2 ms)Flushing bcea31935b7ba636d16fda5ff1400227/info: closing flushed file at 1733619042399Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4fbdcef0: reopening flushed file at 1733619042410 (+11 ms)Finished flush of dataSize ~9.46 KB/9684, heapSize ~10.36 KB/10608, currentSize=0 B/0 for bcea31935b7ba636d16fda5ff1400227 in 22ms, sequenceid=127, compaction requested=true at 1733619042417 (+7 ms)Writing region close event to WAL at 1733619042431 (+14 ms)Running coprocessor post-close hooks at 1733619042436 (+5 ms)Closed at 1733619042436 2024-12-08T00:50:42,438 INFO [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed bcea31935b7ba636d16fda5ff1400227 2024-12-08T00:50:42,439 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=bcea31935b7ba636d16fda5ff1400227, regionState=CLOSED 2024-12-08T00:50:42,441 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure bcea31935b7ba636d16fda5ff1400227, server=0f983e3e5be1,45839,1733619026732 because future has completed 2024-12-08T00:50:42,444 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-12-08T00:50:42,444 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure bcea31935b7ba636d16fda5ff1400227, server=0f983e3e5be1,45839,1733619026732 in 204 msec 2024-12-08T00:50:42,446 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-08T00:50:42,446 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=bcea31935b7ba636d16fda5ff1400227, UNASSIGN in 212 msec 2024-12-08T00:50:42,454 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:50:42,457 INFO [PEWorker-4 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 3 storefiles, region=bcea31935b7ba636d16fda5ff1400227, threads=3 2024-12-08T00:50:42,458 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:42,459 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/81017759f2744aff91e22bab9e3a13e2 for region: bcea31935b7ba636d16fda5ff1400227 2024-12-08T00:50:42,459 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/8e5aae031e91498bac239b58bdd7ade5 for region: bcea31935b7ba636d16fda5ff1400227 2024-12-08T00:50:42,459 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/e890c85e66a84cc58840e923dc7ebb92 for region: bcea31935b7ba636d16fda5ff1400227 2024-12-08T00:50:42,471 DEBUG [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/8e5aae031e91498bac239b58bdd7ade5, top=true 2024-12-08T00:50:42,471 DEBUG [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/e890c85e66a84cc58840e923dc7ebb92, top=true 2024-12-08T00:50:42,476 INFO [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/TestLogRolling-testLogRolling=bcea31935b7ba636d16fda5ff1400227-8e5aae031e91498bac239b58bdd7ade5 for child: c94c6b105330c7ab03f7d694b18f1b38, parent: bcea31935b7ba636d16fda5ff1400227 2024-12-08T00:50:42,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33409 is added to blk_1073741849_1025 (size=27) 2024-12-08T00:50:42,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741849_1025 (size=27) 2024-12-08T00:50:42,476 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/8e5aae031e91498bac239b58bdd7ade5 for region: bcea31935b7ba636d16fda5ff1400227 2024-12-08T00:50:42,477 INFO [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/TestLogRolling-testLogRolling=bcea31935b7ba636d16fda5ff1400227-e890c85e66a84cc58840e923dc7ebb92 for child: c94c6b105330c7ab03f7d694b18f1b38, parent: bcea31935b7ba636d16fda5ff1400227 2024-12-08T00:50:42,477 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/e890c85e66a84cc58840e923dc7ebb92 for region: bcea31935b7ba636d16fda5ff1400227 2024-12-08T00:50:42,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33409 is added to blk_1073741850_1026 (size=27) 2024-12-08T00:50:42,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741850_1026 (size=27) 2024-12-08T00:50:42,485 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/81017759f2744aff91e22bab9e3a13e2 for region: bcea31935b7ba636d16fda5ff1400227 2024-12-08T00:50:42,487 DEBUG [PEWorker-4 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region bcea31935b7ba636d16fda5ff1400227 Daughter A: [hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/b7786dedf2ce8631623c86f93f6e1d74/info/81017759f2744aff91e22bab9e3a13e2.bcea31935b7ba636d16fda5ff1400227] storefiles, Daughter B: [hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/81017759f2744aff91e22bab9e3a13e2.bcea31935b7ba636d16fda5ff1400227, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/TestLogRolling-testLogRolling=bcea31935b7ba636d16fda5ff1400227-8e5aae031e91498bac239b58bdd7ade5, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/TestLogRolling-testLogRolling=bcea31935b7ba636d16fda5ff1400227-e890c85e66a84cc58840e923dc7ebb92] storefiles. 2024-12-08T00:50:42,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741851_1027 (size=71) 2024-12-08T00:50:42,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33409 is added to blk_1073741851_1027 (size=71) 2024-12-08T00:50:42,495 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:50:42,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741852_1028 (size=71) 2024-12-08T00:50:42,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33409 is added to blk_1073741852_1028 (size=71) 2024-12-08T00:50:42,506 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:50:42,516 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/b7786dedf2ce8631623c86f93f6e1d74/recovered.edits/130.seqid, newMaxSeqId=130, maxSeqId=-1 2024-12-08T00:50:42,518 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/recovered.edits/130.seqid, newMaxSeqId=130, maxSeqId=-1 2024-12-08T00:50:42,521 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1733619027867.bcea31935b7ba636d16fda5ff1400227.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1733619042520"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1733619042520"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1733619042520"}]},"ts":"1733619042520"} 2024-12-08T00:50:42,521 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1733619042222.b7786dedf2ce8631623c86f93f6e1d74.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1733619042520"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733619042520"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733619042520"}]},"ts":"1733619042520"} 2024-12-08T00:50:42,521 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1733619042222.c94c6b105330c7ab03f7d694b18f1b38.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1733619042520"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733619042520"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733619042520"}]},"ts":"1733619042520"} 2024-12-08T00:50:42,539 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b7786dedf2ce8631623c86f93f6e1d74, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c94c6b105330c7ab03f7d694b18f1b38, ASSIGN}] 2024-12-08T00:50:42,540 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b7786dedf2ce8631623c86f93f6e1d74, ASSIGN 2024-12-08T00:50:42,540 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c94c6b105330c7ab03f7d694b18f1b38, ASSIGN 2024-12-08T00:50:42,541 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b7786dedf2ce8631623c86f93f6e1d74, ASSIGN; state=SPLITTING_NEW, location=0f983e3e5be1,45839,1733619026732; forceNewPlan=false, retain=false 2024-12-08T00:50:42,541 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c94c6b105330c7ab03f7d694b18f1b38, ASSIGN; state=SPLITTING_NEW, location=0f983e3e5be1,45839,1733619026732; forceNewPlan=false, retain=false 2024-12-08T00:50:42,692 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=c94c6b105330c7ab03f7d694b18f1b38, regionState=OPENING, regionLocation=0f983e3e5be1,45839,1733619026732 2024-12-08T00:50:42,692 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=b7786dedf2ce8631623c86f93f6e1d74, regionState=OPENING, regionLocation=0f983e3e5be1,45839,1733619026732 2024-12-08T00:50:42,695 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b7786dedf2ce8631623c86f93f6e1d74, ASSIGN because future has completed 2024-12-08T00:50:42,695 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure b7786dedf2ce8631623c86f93f6e1d74, server=0f983e3e5be1,45839,1733619026732}] 2024-12-08T00:50:42,696 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c94c6b105330c7ab03f7d694b18f1b38, ASSIGN because future has completed 2024-12-08T00:50:42,697 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure c94c6b105330c7ab03f7d694b18f1b38, server=0f983e3e5be1,45839,1733619026732}] 2024-12-08T00:50:42,856 INFO [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1733619042222.c94c6b105330c7ab03f7d694b18f1b38. 2024-12-08T00:50:42,856 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => c94c6b105330c7ab03f7d694b18f1b38, NAME => 'TestLogRolling-testLogRolling,row0062,1733619042222.c94c6b105330c7ab03f7d694b18f1b38.', STARTKEY => 'row0062', ENDKEY => ''} 2024-12-08T00:50:42,857 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling c94c6b105330c7ab03f7d694b18f1b38 2024-12-08T00:50:42,857 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1733619042222.c94c6b105330c7ab03f7d694b18f1b38.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:50:42,857 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for c94c6b105330c7ab03f7d694b18f1b38 2024-12-08T00:50:42,858 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for c94c6b105330c7ab03f7d694b18f1b38 2024-12-08T00:50:42,860 INFO [StoreOpener-c94c6b105330c7ab03f7d694b18f1b38-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region c94c6b105330c7ab03f7d694b18f1b38 2024-12-08T00:50:42,862 INFO [StoreOpener-c94c6b105330c7ab03f7d694b18f1b38-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c94c6b105330c7ab03f7d694b18f1b38 columnFamilyName info 2024-12-08T00:50:42,862 DEBUG [StoreOpener-c94c6b105330c7ab03f7d694b18f1b38-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:50:42,876 DEBUG [StoreOpener-c94c6b105330c7ab03f7d694b18f1b38-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/81017759f2744aff91e22bab9e3a13e2.bcea31935b7ba636d16fda5ff1400227->hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/81017759f2744aff91e22bab9e3a13e2-top 2024-12-08T00:50:42,881 DEBUG [StoreOpener-c94c6b105330c7ab03f7d694b18f1b38-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/TestLogRolling-testLogRolling=bcea31935b7ba636d16fda5ff1400227-8e5aae031e91498bac239b58bdd7ade5 2024-12-08T00:50:42,885 DEBUG [StoreOpener-c94c6b105330c7ab03f7d694b18f1b38-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/TestLogRolling-testLogRolling=bcea31935b7ba636d16fda5ff1400227-e890c85e66a84cc58840e923dc7ebb92 2024-12-08T00:50:42,885 INFO [StoreOpener-c94c6b105330c7ab03f7d694b18f1b38-1 {}] regionserver.HStore(327): Store=c94c6b105330c7ab03f7d694b18f1b38/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T00:50:42,886 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for c94c6b105330c7ab03f7d694b18f1b38 2024-12-08T00:50:42,886 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38 2024-12-08T00:50:42,888 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38 2024-12-08T00:50:42,888 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for c94c6b105330c7ab03f7d694b18f1b38 2024-12-08T00:50:42,888 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for c94c6b105330c7ab03f7d694b18f1b38 2024-12-08T00:50:42,891 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for c94c6b105330c7ab03f7d694b18f1b38 2024-12-08T00:50:42,892 INFO [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened c94c6b105330c7ab03f7d694b18f1b38; next sequenceid=131; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=855281, jitterRate=0.08754628896713257}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-08T00:50:42,892 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for c94c6b105330c7ab03f7d694b18f1b38 2024-12-08T00:50:42,893 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for c94c6b105330c7ab03f7d694b18f1b38: Running coprocessor pre-open hook at 1733619042858Writing region info on filesystem at 1733619042858Initializing all the Stores at 1733619042860 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733619042860Cleaning up temporary data from old regions at 1733619042888 (+28 ms)Running coprocessor post-open hooks at 1733619042892 (+4 ms)Region opened successfully at 1733619042893 (+1 ms) 2024-12-08T00:50:42,894 INFO [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1733619042222.c94c6b105330c7ab03f7d694b18f1b38., pid=13, masterSystemTime=1733619042848 2024-12-08T00:50:42,895 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store c94c6b105330c7ab03f7d694b18f1b38:info, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:50:42,895 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:50:42,895 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:50:42,896 INFO [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1733619042222.c94c6b105330c7ab03f7d694b18f1b38. 2024-12-08T00:50:42,896 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.HStore(1541): c94c6b105330c7ab03f7d694b18f1b38/info is initiating minor compaction (all files) 2024-12-08T00:50:42,896 INFO [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of c94c6b105330c7ab03f7d694b18f1b38/info in TestLogRolling-testLogRolling,row0062,1733619042222.c94c6b105330c7ab03f7d694b18f1b38. 2024-12-08T00:50:42,896 INFO [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/81017759f2744aff91e22bab9e3a13e2.bcea31935b7ba636d16fda5ff1400227->hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/81017759f2744aff91e22bab9e3a13e2-top, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/TestLogRolling-testLogRolling=bcea31935b7ba636d16fda5ff1400227-e890c85e66a84cc58840e923dc7ebb92, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/TestLogRolling-testLogRolling=bcea31935b7ba636d16fda5ff1400227-8e5aae031e91498bac239b58bdd7ade5] into tmpdir=hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp, totalSize=116.0 K 2024-12-08T00:50:42,897 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1733619042222.c94c6b105330c7ab03f7d694b18f1b38. 2024-12-08T00:50:42,897 INFO [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1733619042222.c94c6b105330c7ab03f7d694b18f1b38. 2024-12-08T00:50:42,897 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] compactions.Compactor(225): Compacting 81017759f2744aff91e22bab9e3a13e2.bcea31935b7ba636d16fda5ff1400227, keycount=38, bloomtype=ROW, size=85.3 K, encoding=NONE, compression=NONE, seqNum=101, earliestPutTs=1733619037979 2024-12-08T00:50:42,897 INFO [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1733619042222.b7786dedf2ce8631623c86f93f6e1d74. 2024-12-08T00:50:42,897 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => b7786dedf2ce8631623c86f93f6e1d74, NAME => 'TestLogRolling-testLogRolling,,1733619042222.b7786dedf2ce8631623c86f93f6e1d74.', STARTKEY => '', ENDKEY => 'row0062'} 2024-12-08T00:50:42,897 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=bcea31935b7ba636d16fda5ff1400227-e890c85e66a84cc58840e923dc7ebb92, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1733619042161 2024-12-08T00:50:42,898 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling b7786dedf2ce8631623c86f93f6e1d74 2024-12-08T00:50:42,898 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1733619042222.b7786dedf2ce8631623c86f93f6e1d74.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:50:42,898 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for b7786dedf2ce8631623c86f93f6e1d74 2024-12-08T00:50:42,898 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for b7786dedf2ce8631623c86f93f6e1d74 2024-12-08T00:50:42,898 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=bcea31935b7ba636d16fda5ff1400227-8e5aae031e91498bac239b58bdd7ade5, keycount=9, bloomtype=ROW, size=14.3 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1733619042183 2024-12-08T00:50:42,898 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=c94c6b105330c7ab03f7d694b18f1b38, regionState=OPEN, openSeqNum=131, regionLocation=0f983e3e5be1,45839,1733619026732 2024-12-08T00:50:42,899 INFO [StoreOpener-b7786dedf2ce8631623c86f93f6e1d74-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region b7786dedf2ce8631623c86f93f6e1d74 2024-12-08T00:50:42,900 INFO [StoreOpener-b7786dedf2ce8631623c86f93f6e1d74-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b7786dedf2ce8631623c86f93f6e1d74 columnFamilyName info 2024-12-08T00:50:42,901 DEBUG [StoreOpener-b7786dedf2ce8631623c86f93f6e1d74-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:50:42,901 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45839 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-12-08T00:50:42,901 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-12-08T00:50:42,901 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.15 KB heapSize=9 KB 2024-12-08T00:50:42,901 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure c94c6b105330c7ab03f7d694b18f1b38, server=0f983e3e5be1,45839,1733619026732 because future has completed 2024-12-08T00:50:42,905 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=11 2024-12-08T00:50:42,906 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure c94c6b105330c7ab03f7d694b18f1b38, server=0f983e3e5be1,45839,1733619026732 in 205 msec 2024-12-08T00:50:42,908 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c94c6b105330c7ab03f7d694b18f1b38, ASSIGN in 367 msec 2024-12-08T00:50:42,914 DEBUG [StoreOpener-b7786dedf2ce8631623c86f93f6e1d74-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/b7786dedf2ce8631623c86f93f6e1d74/info/81017759f2744aff91e22bab9e3a13e2.bcea31935b7ba636d16fda5ff1400227->hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/81017759f2744aff91e22bab9e3a13e2-bottom 2024-12-08T00:50:42,915 INFO [StoreOpener-b7786dedf2ce8631623c86f93f6e1d74-1 {}] regionserver.HStore(327): Store=b7786dedf2ce8631623c86f93f6e1d74/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T00:50:42,915 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for b7786dedf2ce8631623c86f93f6e1d74 2024-12-08T00:50:42,916 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/b7786dedf2ce8631623c86f93f6e1d74 2024-12-08T00:50:42,916 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/hbase/meta/1588230740/.tmp/info/2cbc81ef9f0746c19db021681ee5b11b is 193, key is TestLogRolling-testLogRolling,row0062,1733619042222.c94c6b105330c7ab03f7d694b18f1b38./info:regioninfo/1733619042898/Put/seqid=0 2024-12-08T00:50:42,917 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/b7786dedf2ce8631623c86f93f6e1d74 2024-12-08T00:50:42,917 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for b7786dedf2ce8631623c86f93f6e1d74 2024-12-08T00:50:42,917 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for b7786dedf2ce8631623c86f93f6e1d74 2024-12-08T00:50:42,919 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for b7786dedf2ce8631623c86f93f6e1d74 2024-12-08T00:50:42,920 INFO [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened b7786dedf2ce8631623c86f93f6e1d74; next sequenceid=131; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=880365, jitterRate=0.11944256722927094}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-08T00:50:42,920 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for b7786dedf2ce8631623c86f93f6e1d74 2024-12-08T00:50:42,920 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for b7786dedf2ce8631623c86f93f6e1d74: Running coprocessor pre-open hook at 1733619042898Writing region info on filesystem at 1733619042898Initializing all the Stores at 1733619042899 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733619042899Cleaning up temporary data from old regions at 1733619042917 (+18 ms)Running coprocessor post-open hooks at 1733619042920 (+3 ms)Region opened successfully at 1733619042920 2024-12-08T00:50:42,921 INFO [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1733619042222.b7786dedf2ce8631623c86f93f6e1d74., pid=12, masterSystemTime=1733619042848 2024-12-08T00:50:42,921 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store b7786dedf2ce8631623c86f93f6e1d74:info, priority=-2147483648, current under compaction store size is 2 2024-12-08T00:50:42,921 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:50:42,921 DEBUG [RS:0;0f983e3e5be1:45839-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-12-08T00:50:42,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741853_1029 (size=9882) 2024-12-08T00:50:42,922 INFO [RS:0;0f983e3e5be1:45839-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1733619042222.b7786dedf2ce8631623c86f93f6e1d74. 2024-12-08T00:50:42,922 DEBUG [RS:0;0f983e3e5be1:45839-longCompactions-0 {}] regionserver.HStore(1541): b7786dedf2ce8631623c86f93f6e1d74/info is initiating minor compaction (all files) 2024-12-08T00:50:42,922 INFO [RS:0;0f983e3e5be1:45839-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of b7786dedf2ce8631623c86f93f6e1d74/info in TestLogRolling-testLogRolling,,1733619042222.b7786dedf2ce8631623c86f93f6e1d74. 2024-12-08T00:50:42,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33409 is added to blk_1073741853_1029 (size=9882) 2024-12-08T00:50:42,922 INFO [RS:0;0f983e3e5be1:45839-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/b7786dedf2ce8631623c86f93f6e1d74/info/81017759f2744aff91e22bab9e3a13e2.bcea31935b7ba636d16fda5ff1400227->hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/81017759f2744aff91e22bab9e3a13e2-bottom] into tmpdir=hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/b7786dedf2ce8631623c86f93f6e1d74/.tmp, totalSize=85.3 K 2024-12-08T00:50:42,923 DEBUG [RS:0;0f983e3e5be1:45839-longCompactions-0 {}] compactions.Compactor(225): Compacting 81017759f2744aff91e22bab9e3a13e2.bcea31935b7ba636d16fda5ff1400227, keycount=38, bloomtype=ROW, size=85.3 K, encoding=NONE, compression=NONE, seqNum=100, earliestPutTs=1733619037979 2024-12-08T00:50:42,923 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.95 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/hbase/meta/1588230740/.tmp/info/2cbc81ef9f0746c19db021681ee5b11b 2024-12-08T00:50:42,924 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1733619042222.b7786dedf2ce8631623c86f93f6e1d74. 2024-12-08T00:50:42,924 INFO [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1733619042222.b7786dedf2ce8631623c86f93f6e1d74. 2024-12-08T00:50:42,924 INFO [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c94c6b105330c7ab03f7d694b18f1b38#info#compaction#68 average throughput is 35.92 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:50:42,925 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=b7786dedf2ce8631623c86f93f6e1d74, regionState=OPEN, openSeqNum=131, regionLocation=0f983e3e5be1,45839,1733619026732 2024-12-08T00:50:42,925 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp/info/aceb76fb70444855882bf0c327b5d6cf is 1080, key is row0062/info:/1733619040118/Put/seqid=0 2024-12-08T00:50:42,927 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure b7786dedf2ce8631623c86f93f6e1d74, server=0f983e3e5be1,45839,1733619026732 because future has completed 2024-12-08T00:50:42,930 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=10 2024-12-08T00:50:42,930 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure b7786dedf2ce8631623c86f93f6e1d74, server=0f983e3e5be1,45839,1733619026732 in 234 msec 2024-12-08T00:50:42,932 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=7 2024-12-08T00:50:42,932 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b7786dedf2ce8631623c86f93f6e1d74, ASSIGN in 391 msec 2024-12-08T00:50:42,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33409 is added to blk_1073741854_1030 (size=42984) 2024-12-08T00:50:42,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741854_1030 (size=42984) 2024-12-08T00:50:42,941 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=bcea31935b7ba636d16fda5ff1400227, daughterA=b7786dedf2ce8631623c86f93f6e1d74, daughterB=c94c6b105330c7ab03f7d694b18f1b38 in 710 msec 2024-12-08T00:50:42,947 INFO [RS:0;0f983e3e5be1:45839-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b7786dedf2ce8631623c86f93f6e1d74#info#compaction#69 average throughput is 20.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:50:42,948 DEBUG [RS:0;0f983e3e5be1:45839-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/b7786dedf2ce8631623c86f93f6e1d74/.tmp/info/3ece07c6d79c4849a9e26e83203419d9 is 1080, key is row0001/info:/1733619037979/Put/seqid=0 2024-12-08T00:50:42,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741855_1031 (size=70862) 2024-12-08T00:50:42,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33409 is added to blk_1073741855_1031 (size=70862) 2024-12-08T00:50:42,955 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/hbase/meta/1588230740/.tmp/ns/79a56ac68b524a27863e394cedbb96d7 is 43, key is default/ns:d/1733619027819/Put/seqid=0 2024-12-08T00:50:42,957 DEBUG [RS:0;0f983e3e5be1:45839-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/b7786dedf2ce8631623c86f93f6e1d74/.tmp/info/3ece07c6d79c4849a9e26e83203419d9 as hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/b7786dedf2ce8631623c86f93f6e1d74/info/3ece07c6d79c4849a9e26e83203419d9 2024-12-08T00:50:42,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741856_1032 (size=5153) 2024-12-08T00:50:42,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33409 is added to blk_1073741856_1032 (size=5153) 2024-12-08T00:50:42,961 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/hbase/meta/1588230740/.tmp/ns/79a56ac68b524a27863e394cedbb96d7 2024-12-08T00:50:42,964 INFO [RS:0;0f983e3e5be1:45839-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in b7786dedf2ce8631623c86f93f6e1d74/info of b7786dedf2ce8631623c86f93f6e1d74 into 3ece07c6d79c4849a9e26e83203419d9(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:50:42,964 DEBUG [RS:0;0f983e3e5be1:45839-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for b7786dedf2ce8631623c86f93f6e1d74: 2024-12-08T00:50:42,964 INFO [RS:0;0f983e3e5be1:45839-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733619042222.b7786dedf2ce8631623c86f93f6e1d74., storeName=b7786dedf2ce8631623c86f93f6e1d74/info, priority=15, startTime=1733619042921; duration=0sec 2024-12-08T00:50:42,964 DEBUG [RS:0;0f983e3e5be1:45839-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:50:42,964 DEBUG [RS:0;0f983e3e5be1:45839-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b7786dedf2ce8631623c86f93f6e1d74:info 2024-12-08T00:50:42,977 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/hbase/meta/1588230740/.tmp/table/2589649b51d1409ab762cb58c5358655 is 65, key is TestLogRolling-testLogRolling/table:state/1733619028240/Put/seqid=0 2024-12-08T00:50:42,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741857_1033 (size=5340) 2024-12-08T00:50:42,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33409 is added to blk_1073741857_1033 (size=5340) 2024-12-08T00:50:42,982 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/hbase/meta/1588230740/.tmp/table/2589649b51d1409ab762cb58c5358655 2024-12-08T00:50:42,987 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/hbase/meta/1588230740/.tmp/info/2cbc81ef9f0746c19db021681ee5b11b as hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/hbase/meta/1588230740/info/2cbc81ef9f0746c19db021681ee5b11b 2024-12-08T00:50:42,992 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/hbase/meta/1588230740/info/2cbc81ef9f0746c19db021681ee5b11b, entries=30, sequenceid=17, filesize=9.7 K 2024-12-08T00:50:42,993 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/hbase/meta/1588230740/.tmp/ns/79a56ac68b524a27863e394cedbb96d7 as hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/hbase/meta/1588230740/ns/79a56ac68b524a27863e394cedbb96d7 2024-12-08T00:50:42,998 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/hbase/meta/1588230740/ns/79a56ac68b524a27863e394cedbb96d7, entries=2, sequenceid=17, filesize=5.0 K 2024-12-08T00:50:42,999 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/hbase/meta/1588230740/.tmp/table/2589649b51d1409ab762cb58c5358655 as hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/hbase/meta/1588230740/table/2589649b51d1409ab762cb58c5358655 2024-12-08T00:50:43,004 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/hbase/meta/1588230740/table/2589649b51d1409ab762cb58c5358655, entries=2, sequenceid=17, filesize=5.2 K 2024-12-08T00:50:43,005 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.15 KB/5269, heapSize ~8.70 KB/8912, currentSize=670 B/670 for 1588230740 in 104ms, sequenceid=17, compaction requested=false 2024-12-08T00:50:43,005 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-12-08T00:50:43,352 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp/info/aceb76fb70444855882bf0c327b5d6cf as hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/aceb76fb70444855882bf0c327b5d6cf 2024-12-08T00:50:43,362 INFO [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in c94c6b105330c7ab03f7d694b18f1b38/info of c94c6b105330c7ab03f7d694b18f1b38 into aceb76fb70444855882bf0c327b5d6cf(size=42.0 K), total size for store is 42.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:50:43,362 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for c94c6b105330c7ab03f7d694b18f1b38: 2024-12-08T00:50:43,362 INFO [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733619042222.c94c6b105330c7ab03f7d694b18f1b38., storeName=c94c6b105330c7ab03f7d694b18f1b38/info, priority=13, startTime=1733619042894; duration=0sec 2024-12-08T00:50:43,362 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:50:43,362 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c94c6b105330c7ab03f7d694b18f1b38:info 2024-12-08T00:50:43,432 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:43,459 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:44,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45839 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:38846 deadline: 1733619054205, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1733619027867.bcea31935b7ba636d16fda5ff1400227. is not online on 0f983e3e5be1,45839,1733619026732 2024-12-08T00:50:44,230 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1733619027867.bcea31935b7ba636d16fda5ff1400227., hostname=0f983e3e5be1,45839,1733619026732, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1733619027867.bcea31935b7ba636d16fda5ff1400227., hostname=0f983e3e5be1,45839,1733619026732, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1733619027867.bcea31935b7ba636d16fda5ff1400227. is not online on 0f983e3e5be1,45839,1733619026732 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-08T00:50:44,230 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1733619027867.bcea31935b7ba636d16fda5ff1400227., hostname=0f983e3e5be1,45839,1733619026732, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1733619027867.bcea31935b7ba636d16fda5ff1400227. is not online on 0f983e3e5be1,45839,1733619026732 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-08T00:50:44,230 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1733619027867.bcea31935b7ba636d16fda5ff1400227., hostname=0f983e3e5be1,45839,1733619026732, seqNum=2 from cache 2024-12-08T00:50:44,433 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:44,460 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:45,434 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:45,461 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:46,435 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:46,462 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:47,436 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:47,438 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:47,439 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:47,439 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:47,440 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:47,440 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:47,440 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:47,442 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:47,442 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:47,460 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:47,460 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:47,460 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:47,460 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:47,460 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:47,460 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:47,462 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:47,463 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:47,462 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:47,463 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:47,464 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:47,970 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-08T00:50:47,973 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:47,974 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:47,975 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:47,975 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:47,976 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:47,976 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:47,977 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:47,978 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:47,994 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:47,994 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:47,994 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:47,994 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:47,995 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:47,995 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:47,997 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:47,997 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:47,997 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:47,999 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:50:48,437 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:48,463 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:49,438 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:49,464 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:50,440 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:50,465 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:51,441 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:51,466 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:52,442 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:52,467 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:53,443 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:53,467 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:54,278 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0097', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1733619042222.c94c6b105330c7ab03f7d694b18f1b38., hostname=0f983e3e5be1,45839,1733619026732, seqNum=131] 2024-12-08T00:50:54,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45839 {}] regionserver.HRegion(8855): Flush requested on c94c6b105330c7ab03f7d694b18f1b38 2024-12-08T00:50:54,289 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c94c6b105330c7ab03f7d694b18f1b38 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-08T00:50:54,293 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp/info/06aa4448d91a4a71a8ee09288162a954 is 1080, key is row0097/info:/1733619054279/Put/seqid=0 2024-12-08T00:50:54,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741858_1034 (size=12516) 2024-12-08T00:50:54,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33409 is added to blk_1073741858_1034 (size=12516) 2024-12-08T00:50:54,298 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=141 (bloomFilter=true), to=hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp/info/06aa4448d91a4a71a8ee09288162a954 2024-12-08T00:50:54,304 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp/info/06aa4448d91a4a71a8ee09288162a954 as hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/06aa4448d91a4a71a8ee09288162a954 2024-12-08T00:50:54,309 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/06aa4448d91a4a71a8ee09288162a954, entries=7, sequenceid=141, filesize=12.2 K 2024-12-08T00:50:54,310 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10760 for c94c6b105330c7ab03f7d694b18f1b38 in 21ms, sequenceid=141, compaction requested=false 2024-12-08T00:50:54,310 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c94c6b105330c7ab03f7d694b18f1b38: 2024-12-08T00:50:54,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45839 {}] regionserver.HRegion(8855): Flush requested on c94c6b105330c7ab03f7d694b18f1b38 2024-12-08T00:50:54,311 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c94c6b105330c7ab03f7d694b18f1b38 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-12-08T00:50:54,314 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp/info/e9a2a292b11b47f2962e8b18f58f5228 is 1080, key is row0104/info:/1733619054290/Put/seqid=0 2024-12-08T00:50:54,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33409 is added to blk_1073741859_1035 (size=16828) 2024-12-08T00:50:54,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741859_1035 (size=16828) 2024-12-08T00:50:54,322 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp/info/e9a2a292b11b47f2962e8b18f58f5228 2024-12-08T00:50:54,328 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp/info/e9a2a292b11b47f2962e8b18f58f5228 as hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/e9a2a292b11b47f2962e8b18f58f5228 2024-12-08T00:50:54,333 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/e9a2a292b11b47f2962e8b18f58f5228, entries=11, sequenceid=155, filesize=16.4 K 2024-12-08T00:50:54,334 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=11.56 KB/11836 for c94c6b105330c7ab03f7d694b18f1b38 in 23ms, sequenceid=155, compaction requested=true 2024-12-08T00:50:54,334 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c94c6b105330c7ab03f7d694b18f1b38: 2024-12-08T00:50:54,334 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c94c6b105330c7ab03f7d694b18f1b38:info, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:50:54,334 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:50:54,334 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:50:54,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45839 {}] regionserver.HRegion(8855): Flush requested on c94c6b105330c7ab03f7d694b18f1b38 2024-12-08T00:50:54,335 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c94c6b105330c7ab03f7d694b18f1b38 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-12-08T00:50:54,335 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 72328 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:50:54,335 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.HStore(1541): c94c6b105330c7ab03f7d694b18f1b38/info is initiating minor compaction (all files) 2024-12-08T00:50:54,335 INFO [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of c94c6b105330c7ab03f7d694b18f1b38/info in TestLogRolling-testLogRolling,row0062,1733619042222.c94c6b105330c7ab03f7d694b18f1b38. 2024-12-08T00:50:54,336 INFO [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/aceb76fb70444855882bf0c327b5d6cf, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/06aa4448d91a4a71a8ee09288162a954, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/e9a2a292b11b47f2962e8b18f58f5228] into tmpdir=hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp, totalSize=70.6 K 2024-12-08T00:50:54,336 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] compactions.Compactor(225): Compacting aceb76fb70444855882bf0c327b5d6cf, keycount=35, bloomtype=ROW, size=42.0 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1733619040118 2024-12-08T00:50:54,336 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] compactions.Compactor(225): Compacting 06aa4448d91a4a71a8ee09288162a954, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=141, earliestPutTs=1733619054279 2024-12-08T00:50:54,336 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] compactions.Compactor(225): Compacting e9a2a292b11b47f2962e8b18f58f5228, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1733619054290 2024-12-08T00:50:54,338 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp/info/9f88d098dcc14c2da4979192bc2f80fa is 1080, key is row0115/info:/1733619054312/Put/seqid=0 2024-12-08T00:50:54,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741860_1036 (size=17906) 2024-12-08T00:50:54,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33409 is added to blk_1073741860_1036 (size=17906) 2024-12-08T00:50:54,343 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp/info/9f88d098dcc14c2da4979192bc2f80fa 2024-12-08T00:50:54,347 INFO [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c94c6b105330c7ab03f7d694b18f1b38#info#compaction#75 average throughput is 54.39 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:50:54,347 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp/info/e0987733de87462cb7f8459456c1e159 is 1080, key is row0062/info:/1733619040118/Put/seqid=0 2024-12-08T00:50:54,348 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp/info/9f88d098dcc14c2da4979192bc2f80fa as hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/9f88d098dcc14c2da4979192bc2f80fa 2024-12-08T00:50:54,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33409 is added to blk_1073741861_1037 (size=62558) 2024-12-08T00:50:54,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741861_1037 (size=62558) 2024-12-08T00:50:54,354 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/9f88d098dcc14c2da4979192bc2f80fa, entries=12, sequenceid=170, filesize=17.5 K 2024-12-08T00:50:54,355 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=2.10 KB/2152 for c94c6b105330c7ab03f7d694b18f1b38 in 21ms, sequenceid=170, compaction requested=false 2024-12-08T00:50:54,355 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c94c6b105330c7ab03f7d694b18f1b38: 2024-12-08T00:50:54,357 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp/info/e0987733de87462cb7f8459456c1e159 as hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/e0987733de87462cb7f8459456c1e159 2024-12-08T00:50:54,362 INFO [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in c94c6b105330c7ab03f7d694b18f1b38/info of c94c6b105330c7ab03f7d694b18f1b38 into e0987733de87462cb7f8459456c1e159(size=61.1 K), total size for store is 78.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:50:54,362 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for c94c6b105330c7ab03f7d694b18f1b38: 2024-12-08T00:50:54,362 INFO [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733619042222.c94c6b105330c7ab03f7d694b18f1b38., storeName=c94c6b105330c7ab03f7d694b18f1b38/info, priority=13, startTime=1733619054334; duration=0sec 2024-12-08T00:50:54,362 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:50:54,362 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c94c6b105330c7ab03f7d694b18f1b38:info 2024-12-08T00:50:54,444 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:54,468 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:55,444 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:55,469 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:56,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45839 {}] regionserver.HRegion(8855): Flush requested on c94c6b105330c7ab03f7d694b18f1b38 2024-12-08T00:50:56,352 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c94c6b105330c7ab03f7d694b18f1b38 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-08T00:50:56,357 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp/info/003aa50b8f7447c686cdf80c3c6b47b9 is 1080, key is row0127/info:/1733619054336/Put/seqid=0 2024-12-08T00:50:56,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33409 is added to blk_1073741862_1038 (size=12516) 2024-12-08T00:50:56,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741862_1038 (size=12516) 2024-12-08T00:50:56,363 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=181 (bloomFilter=true), to=hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp/info/003aa50b8f7447c686cdf80c3c6b47b9 2024-12-08T00:50:56,370 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp/info/003aa50b8f7447c686cdf80c3c6b47b9 as hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/003aa50b8f7447c686cdf80c3c6b47b9 2024-12-08T00:50:56,376 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/003aa50b8f7447c686cdf80c3c6b47b9, entries=7, sequenceid=181, filesize=12.2 K 2024-12-08T00:50:56,377 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=11.56 KB/11836 for c94c6b105330c7ab03f7d694b18f1b38 in 25ms, sequenceid=181, compaction requested=true 2024-12-08T00:50:56,377 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c94c6b105330c7ab03f7d694b18f1b38: 2024-12-08T00:50:56,377 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c94c6b105330c7ab03f7d694b18f1b38:info, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:50:56,377 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:50:56,377 DEBUG [RS:0;0f983e3e5be1:45839-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:50:56,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45839 {}] regionserver.HRegion(8855): Flush requested on c94c6b105330c7ab03f7d694b18f1b38 2024-12-08T00:50:56,378 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c94c6b105330c7ab03f7d694b18f1b38 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-12-08T00:50:56,378 DEBUG [RS:0;0f983e3e5be1:45839-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 92980 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:50:56,378 DEBUG [RS:0;0f983e3e5be1:45839-longCompactions-0 {}] regionserver.HStore(1541): c94c6b105330c7ab03f7d694b18f1b38/info is initiating minor compaction (all files) 2024-12-08T00:50:56,378 INFO [RS:0;0f983e3e5be1:45839-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of c94c6b105330c7ab03f7d694b18f1b38/info in TestLogRolling-testLogRolling,row0062,1733619042222.c94c6b105330c7ab03f7d694b18f1b38. 2024-12-08T00:50:56,378 INFO [RS:0;0f983e3e5be1:45839-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/e0987733de87462cb7f8459456c1e159, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/9f88d098dcc14c2da4979192bc2f80fa, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/003aa50b8f7447c686cdf80c3c6b47b9] into tmpdir=hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp, totalSize=90.8 K 2024-12-08T00:50:56,379 DEBUG [RS:0;0f983e3e5be1:45839-longCompactions-0 {}] compactions.Compactor(225): Compacting e0987733de87462cb7f8459456c1e159, keycount=53, bloomtype=ROW, size=61.1 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1733619040118 2024-12-08T00:50:56,379 DEBUG [RS:0;0f983e3e5be1:45839-longCompactions-0 {}] compactions.Compactor(225): Compacting 9f88d098dcc14c2da4979192bc2f80fa, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1733619054312 2024-12-08T00:50:56,380 DEBUG [RS:0;0f983e3e5be1:45839-longCompactions-0 {}] compactions.Compactor(225): Compacting 003aa50b8f7447c686cdf80c3c6b47b9, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=181, earliestPutTs=1733619054336 2024-12-08T00:50:56,382 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp/info/7357b25d0c6e4edcadf4d8978443276b is 1080, key is row0134/info:/1733619056353/Put/seqid=0 2024-12-08T00:50:56,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33409 is added to blk_1073741863_1039 (size=17906) 2024-12-08T00:50:56,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741863_1039 (size=17906) 2024-12-08T00:50:56,388 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp/info/7357b25d0c6e4edcadf4d8978443276b 2024-12-08T00:50:56,392 INFO [RS:0;0f983e3e5be1:45839-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c94c6b105330c7ab03f7d694b18f1b38#info#compaction#78 average throughput is 24.63 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:50:56,392 DEBUG [RS:0;0f983e3e5be1:45839-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp/info/bacc7ac1c45745039f6c902e33d36519 is 1080, key is row0062/info:/1733619040118/Put/seqid=0 2024-12-08T00:50:56,393 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp/info/7357b25d0c6e4edcadf4d8978443276b as hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/7357b25d0c6e4edcadf4d8978443276b 2024-12-08T00:50:56,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33409 is added to blk_1073741864_1040 (size=83215) 2024-12-08T00:50:56,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741864_1040 (size=83215) 2024-12-08T00:50:56,399 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/7357b25d0c6e4edcadf4d8978443276b, entries=12, sequenceid=196, filesize=17.5 K 2024-12-08T00:50:56,400 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=10.51 KB/10760 for c94c6b105330c7ab03f7d694b18f1b38 in 22ms, sequenceid=196, compaction requested=false 2024-12-08T00:50:56,400 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c94c6b105330c7ab03f7d694b18f1b38: 2024-12-08T00:50:56,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45839 {}] regionserver.HRegion(8855): Flush requested on c94c6b105330c7ab03f7d694b18f1b38 2024-12-08T00:50:56,400 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c94c6b105330c7ab03f7d694b18f1b38 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-12-08T00:50:56,404 DEBUG [RS:0;0f983e3e5be1:45839-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp/info/bacc7ac1c45745039f6c902e33d36519 as hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/bacc7ac1c45745039f6c902e33d36519 2024-12-08T00:50:56,406 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp/info/b9d824d97dbc4857ac235b7679b0b1ea is 1080, key is row0146/info:/1733619056379/Put/seqid=0 2024-12-08T00:50:56,410 INFO [RS:0;0f983e3e5be1:45839-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in c94c6b105330c7ab03f7d694b18f1b38/info of c94c6b105330c7ab03f7d694b18f1b38 into bacc7ac1c45745039f6c902e33d36519(size=81.3 K), total size for store is 98.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:50:56,410 DEBUG [RS:0;0f983e3e5be1:45839-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for c94c6b105330c7ab03f7d694b18f1b38: 2024-12-08T00:50:56,410 INFO [RS:0;0f983e3e5be1:45839-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733619042222.c94c6b105330c7ab03f7d694b18f1b38., storeName=c94c6b105330c7ab03f7d694b18f1b38/info, priority=13, startTime=1733619056377; duration=0sec 2024-12-08T00:50:56,410 DEBUG [RS:0;0f983e3e5be1:45839-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:50:56,410 DEBUG [RS:0;0f983e3e5be1:45839-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c94c6b105330c7ab03f7d694b18f1b38:info 2024-12-08T00:50:56,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741865_1041 (size=16828) 2024-12-08T00:50:56,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33409 is added to blk_1073741865_1041 (size=16828) 2024-12-08T00:50:56,411 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp/info/b9d824d97dbc4857ac235b7679b0b1ea 2024-12-08T00:50:56,416 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp/info/b9d824d97dbc4857ac235b7679b0b1ea as hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/b9d824d97dbc4857ac235b7679b0b1ea 2024-12-08T00:50:56,420 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/b9d824d97dbc4857ac235b7679b0b1ea, entries=11, sequenceid=210, filesize=16.4 K 2024-12-08T00:50:56,421 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=4.20 KB/4304 for c94c6b105330c7ab03f7d694b18f1b38 in 21ms, sequenceid=210, compaction requested=true 2024-12-08T00:50:56,421 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c94c6b105330c7ab03f7d694b18f1b38: 2024-12-08T00:50:56,421 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c94c6b105330c7ab03f7d694b18f1b38:info, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:50:56,421 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:50:56,421 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:50:56,422 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 117949 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:50:56,422 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.HStore(1541): c94c6b105330c7ab03f7d694b18f1b38/info is initiating minor compaction (all files) 2024-12-08T00:50:56,422 INFO [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of c94c6b105330c7ab03f7d694b18f1b38/info in TestLogRolling-testLogRolling,row0062,1733619042222.c94c6b105330c7ab03f7d694b18f1b38. 2024-12-08T00:50:56,422 INFO [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/bacc7ac1c45745039f6c902e33d36519, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/7357b25d0c6e4edcadf4d8978443276b, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/b9d824d97dbc4857ac235b7679b0b1ea] into tmpdir=hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp, totalSize=115.2 K 2024-12-08T00:50:56,423 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] compactions.Compactor(225): Compacting bacc7ac1c45745039f6c902e33d36519, keycount=72, bloomtype=ROW, size=81.3 K, encoding=NONE, compression=NONE, seqNum=181, earliestPutTs=1733619040118 2024-12-08T00:50:56,423 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7357b25d0c6e4edcadf4d8978443276b, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1733619056353 2024-12-08T00:50:56,423 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] compactions.Compactor(225): Compacting b9d824d97dbc4857ac235b7679b0b1ea, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1733619056379 2024-12-08T00:50:56,435 INFO [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c94c6b105330c7ab03f7d694b18f1b38#info#compaction#80 average throughput is 48.74 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:50:56,435 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp/info/57ea8e1f0b7d4a6188af1d56c2e84449 is 1080, key is row0062/info:/1733619040118/Put/seqid=0 2024-12-08T00:50:56,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33409 is added to blk_1073741866_1042 (size=108119) 2024-12-08T00:50:56,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741866_1042 (size=108119) 2024-12-08T00:50:56,445 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp/info/57ea8e1f0b7d4a6188af1d56c2e84449 as hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/57ea8e1f0b7d4a6188af1d56c2e84449 2024-12-08T00:50:56,445 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:56,451 INFO [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in c94c6b105330c7ab03f7d694b18f1b38/info of c94c6b105330c7ab03f7d694b18f1b38 into 57ea8e1f0b7d4a6188af1d56c2e84449(size=105.6 K), total size for store is 105.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:50:56,451 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for c94c6b105330c7ab03f7d694b18f1b38: 2024-12-08T00:50:56,451 INFO [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733619042222.c94c6b105330c7ab03f7d694b18f1b38., storeName=c94c6b105330c7ab03f7d694b18f1b38/info, priority=13, startTime=1733619056421; duration=0sec 2024-12-08T00:50:56,451 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:50:56,452 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c94c6b105330c7ab03f7d694b18f1b38:info 2024-12-08T00:50:56,470 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:56,574 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-08T00:50:57,446 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:57,470 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:58,447 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:58,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45839 {}] regionserver.HRegion(8855): Flush requested on c94c6b105330c7ab03f7d694b18f1b38 2024-12-08T00:50:58,469 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c94c6b105330c7ab03f7d694b18f1b38 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-08T00:50:58,471 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:58,476 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp/info/a490992dfd754b8f8a2a4b3146ac59c7 is 1080, key is row0157/info:/1733619056402/Put/seqid=0 2024-12-08T00:50:58,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741867_1043 (size=12516) 2024-12-08T00:50:58,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33409 is added to blk_1073741867_1043 (size=12516) 2024-12-08T00:50:58,481 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=222 (bloomFilter=true), to=hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp/info/a490992dfd754b8f8a2a4b3146ac59c7 2024-12-08T00:50:58,486 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp/info/a490992dfd754b8f8a2a4b3146ac59c7 as hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/a490992dfd754b8f8a2a4b3146ac59c7 2024-12-08T00:50:58,491 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/a490992dfd754b8f8a2a4b3146ac59c7, entries=7, sequenceid=222, filesize=12.2 K 2024-12-08T00:50:58,492 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=11.56 KB/11836 for c94c6b105330c7ab03f7d694b18f1b38 in 23ms, sequenceid=222, compaction requested=false 2024-12-08T00:50:58,492 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c94c6b105330c7ab03f7d694b18f1b38: 2024-12-08T00:50:58,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45839 {}] regionserver.HRegion(8855): Flush requested on c94c6b105330c7ab03f7d694b18f1b38 2024-12-08T00:50:58,494 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c94c6b105330c7ab03f7d694b18f1b38 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-12-08T00:50:58,498 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp/info/09179db450f84b94a1ed91b5f98d0fe1 is 1080, key is row0164/info:/1733619058471/Put/seqid=0 2024-12-08T00:50:58,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33409 is added to blk_1073741868_1044 (size=17906) 2024-12-08T00:50:58,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741868_1044 (size=17906) 2024-12-08T00:50:58,504 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp/info/09179db450f84b94a1ed91b5f98d0fe1 2024-12-08T00:50:58,509 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp/info/09179db450f84b94a1ed91b5f98d0fe1 as hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/09179db450f84b94a1ed91b5f98d0fe1 2024-12-08T00:50:58,514 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/09179db450f84b94a1ed91b5f98d0fe1, entries=12, sequenceid=237, filesize=17.5 K 2024-12-08T00:50:58,515 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=11.56 KB/11836 for c94c6b105330c7ab03f7d694b18f1b38 in 21ms, sequenceid=237, compaction requested=true 2024-12-08T00:50:58,515 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c94c6b105330c7ab03f7d694b18f1b38: 2024-12-08T00:50:58,515 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c94c6b105330c7ab03f7d694b18f1b38:info, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:50:58,515 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:50:58,515 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:50:58,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45839 {}] regionserver.HRegion(8855): Flush requested on c94c6b105330c7ab03f7d694b18f1b38 2024-12-08T00:50:58,515 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c94c6b105330c7ab03f7d694b18f1b38 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-12-08T00:50:58,516 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 138541 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:50:58,516 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.HStore(1541): c94c6b105330c7ab03f7d694b18f1b38/info is initiating minor compaction (all files) 2024-12-08T00:50:58,516 INFO [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of c94c6b105330c7ab03f7d694b18f1b38/info in TestLogRolling-testLogRolling,row0062,1733619042222.c94c6b105330c7ab03f7d694b18f1b38. 2024-12-08T00:50:58,516 INFO [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/57ea8e1f0b7d4a6188af1d56c2e84449, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/a490992dfd754b8f8a2a4b3146ac59c7, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/09179db450f84b94a1ed91b5f98d0fe1] into tmpdir=hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp, totalSize=135.3 K 2024-12-08T00:50:58,517 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] compactions.Compactor(225): Compacting 57ea8e1f0b7d4a6188af1d56c2e84449, keycount=95, bloomtype=ROW, size=105.6 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1733619040118 2024-12-08T00:50:58,517 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] compactions.Compactor(225): Compacting a490992dfd754b8f8a2a4b3146ac59c7, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=222, earliestPutTs=1733619056402 2024-12-08T00:50:58,517 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] compactions.Compactor(225): Compacting 09179db450f84b94a1ed91b5f98d0fe1, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1733619058471 2024-12-08T00:50:58,519 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp/info/3af92e94a83240088860383010391fa7 is 1080, key is row0176/info:/1733619058494/Put/seqid=0 2024-12-08T00:50:58,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33409 is added to blk_1073741869_1045 (size=17906) 2024-12-08T00:50:58,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741869_1045 (size=17906) 2024-12-08T00:50:58,525 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp/info/3af92e94a83240088860383010391fa7 2024-12-08T00:50:58,531 INFO [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c94c6b105330c7ab03f7d694b18f1b38#info#compaction#84 average throughput is 116.98 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:50:58,531 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp/info/3af92e94a83240088860383010391fa7 as hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/3af92e94a83240088860383010391fa7 2024-12-08T00:50:58,531 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp/info/6cf6e54e9f0c4ac39beb035f7f199279 is 1080, key is row0062/info:/1733619040118/Put/seqid=0 2024-12-08T00:50:58,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741870_1046 (size=128835) 2024-12-08T00:50:58,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33409 is added to blk_1073741870_1046 (size=128835) 2024-12-08T00:50:58,537 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/3af92e94a83240088860383010391fa7, entries=12, sequenceid=252, filesize=17.5 K 2024-12-08T00:50:58,538 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=5.25 KB/5380 for c94c6b105330c7ab03f7d694b18f1b38 in 23ms, sequenceid=252, compaction requested=false 2024-12-08T00:50:58,538 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c94c6b105330c7ab03f7d694b18f1b38: 2024-12-08T00:50:58,539 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp/info/6cf6e54e9f0c4ac39beb035f7f199279 as hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/6cf6e54e9f0c4ac39beb035f7f199279 2024-12-08T00:50:58,545 INFO [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in c94c6b105330c7ab03f7d694b18f1b38/info of c94c6b105330c7ab03f7d694b18f1b38 into 6cf6e54e9f0c4ac39beb035f7f199279(size=125.8 K), total size for store is 143.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:50:58,545 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for c94c6b105330c7ab03f7d694b18f1b38: 2024-12-08T00:50:58,545 INFO [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733619042222.c94c6b105330c7ab03f7d694b18f1b38., storeName=c94c6b105330c7ab03f7d694b18f1b38/info, priority=13, startTime=1733619058515; duration=0sec 2024-12-08T00:50:58,545 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:50:58,545 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c94c6b105330c7ab03f7d694b18f1b38:info 2024-12-08T00:50:59,448 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:50:59,472 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:51:00,449 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:51:00,472 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:51:00,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45839 {}] regionserver.HRegion(8855): Flush requested on c94c6b105330c7ab03f7d694b18f1b38 2024-12-08T00:51:00,538 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c94c6b105330c7ab03f7d694b18f1b38 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-08T00:51:00,542 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp/info/6ca78fbcdce544e199e255e5155fd481 is 1080, key is row0188/info:/1733619058516/Put/seqid=0 2024-12-08T00:51:00,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741871_1047 (size=12521) 2024-12-08T00:51:00,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33409 is added to blk_1073741871_1047 (size=12521) 2024-12-08T00:51:00,551 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=263 (bloomFilter=true), to=hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp/info/6ca78fbcdce544e199e255e5155fd481 2024-12-08T00:51:00,557 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp/info/6ca78fbcdce544e199e255e5155fd481 as hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/6ca78fbcdce544e199e255e5155fd481 2024-12-08T00:51:00,562 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/6ca78fbcdce544e199e255e5155fd481, entries=7, sequenceid=263, filesize=12.2 K 2024-12-08T00:51:00,563 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=13.66 KB/13988 for c94c6b105330c7ab03f7d694b18f1b38 in 24ms, sequenceid=263, compaction requested=true 2024-12-08T00:51:00,563 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c94c6b105330c7ab03f7d694b18f1b38: 2024-12-08T00:51:00,563 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c94c6b105330c7ab03f7d694b18f1b38:info, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:51:00,563 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:51:00,563 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:51:00,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45839 {}] regionserver.HRegion(8855): Flush requested on c94c6b105330c7ab03f7d694b18f1b38 2024-12-08T00:51:00,564 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c94c6b105330c7ab03f7d694b18f1b38 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-12-08T00:51:00,564 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 159262 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:51:00,564 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.HStore(1541): c94c6b105330c7ab03f7d694b18f1b38/info is initiating minor compaction (all files) 2024-12-08T00:51:00,564 INFO [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of c94c6b105330c7ab03f7d694b18f1b38/info in TestLogRolling-testLogRolling,row0062,1733619042222.c94c6b105330c7ab03f7d694b18f1b38. 2024-12-08T00:51:00,564 INFO [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/6cf6e54e9f0c4ac39beb035f7f199279, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/3af92e94a83240088860383010391fa7, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/6ca78fbcdce544e199e255e5155fd481] into tmpdir=hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp, totalSize=155.5 K 2024-12-08T00:51:00,565 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6cf6e54e9f0c4ac39beb035f7f199279, keycount=114, bloomtype=ROW, size=125.8 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1733619040118 2024-12-08T00:51:00,565 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3af92e94a83240088860383010391fa7, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1733619058494 2024-12-08T00:51:00,566 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6ca78fbcdce544e199e255e5155fd481, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=263, earliestPutTs=1733619058516 2024-12-08T00:51:00,567 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp/info/24bb797d8266425db226a3b6eb8389fd is 1080, key is row0195/info:/1733619060540/Put/seqid=0 2024-12-08T00:51:00,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741872_1048 (size=20092) 2024-12-08T00:51:00,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33409 is added to blk_1073741872_1048 (size=20092) 2024-12-08T00:51:00,572 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=280 (bloomFilter=true), to=hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp/info/24bb797d8266425db226a3b6eb8389fd 2024-12-08T00:51:00,578 INFO [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c94c6b105330c7ab03f7d694b18f1b38#info#compaction#87 average throughput is 45.49 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:51:00,578 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp/info/385dc132ee304aeb9a209acc12c7437e is 1080, key is row0062/info:/1733619040118/Put/seqid=0 2024-12-08T00:51:00,579 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp/info/24bb797d8266425db226a3b6eb8389fd as hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/24bb797d8266425db226a3b6eb8389fd 2024-12-08T00:51:00,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33409 is added to blk_1073741873_1049 (size=149497) 2024-12-08T00:51:00,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741873_1049 (size=149497) 2024-12-08T00:51:00,584 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/24bb797d8266425db226a3b6eb8389fd, entries=14, sequenceid=280, filesize=19.6 K 2024-12-08T00:51:00,585 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=9.46 KB/9684 for c94c6b105330c7ab03f7d694b18f1b38 in 21ms, sequenceid=280, compaction requested=false 2024-12-08T00:51:00,585 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c94c6b105330c7ab03f7d694b18f1b38: 2024-12-08T00:51:00,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45839 {}] regionserver.HRegion(8855): Flush requested on c94c6b105330c7ab03f7d694b18f1b38 2024-12-08T00:51:00,587 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c94c6b105330c7ab03f7d694b18f1b38 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-12-08T00:51:00,587 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp/info/385dc132ee304aeb9a209acc12c7437e as hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/385dc132ee304aeb9a209acc12c7437e 2024-12-08T00:51:00,590 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp/info/533251601552462597d5edba43a1b33a is 1080, key is row0209/info:/1733619060565/Put/seqid=0 2024-12-08T00:51:00,592 INFO [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in c94c6b105330c7ab03f7d694b18f1b38/info of c94c6b105330c7ab03f7d694b18f1b38 into 385dc132ee304aeb9a209acc12c7437e(size=146.0 K), total size for store is 165.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:51:00,592 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for c94c6b105330c7ab03f7d694b18f1b38: 2024-12-08T00:51:00,592 INFO [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733619042222.c94c6b105330c7ab03f7d694b18f1b38., storeName=c94c6b105330c7ab03f7d694b18f1b38/info, priority=13, startTime=1733619060563; duration=0sec 2024-12-08T00:51:00,593 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:51:00,593 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c94c6b105330c7ab03f7d694b18f1b38:info 2024-12-08T00:51:00,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741874_1050 (size=16839) 2024-12-08T00:51:00,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33409 is added to blk_1073741874_1050 (size=16839) 2024-12-08T00:51:00,597 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=294 (bloomFilter=true), to=hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp/info/533251601552462597d5edba43a1b33a 2024-12-08T00:51:00,602 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp/info/533251601552462597d5edba43a1b33a as hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/533251601552462597d5edba43a1b33a 2024-12-08T00:51:00,606 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/533251601552462597d5edba43a1b33a, entries=11, sequenceid=294, filesize=16.4 K 2024-12-08T00:51:00,607 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=5.25 KB/5380 for c94c6b105330c7ab03f7d694b18f1b38 in 20ms, sequenceid=294, compaction requested=true 2024-12-08T00:51:00,607 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c94c6b105330c7ab03f7d694b18f1b38: 2024-12-08T00:51:00,607 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c94c6b105330c7ab03f7d694b18f1b38:info, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:51:00,608 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:51:00,608 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:51:00,608 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 186428 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:51:00,608 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.HStore(1541): c94c6b105330c7ab03f7d694b18f1b38/info is initiating minor compaction (all files) 2024-12-08T00:51:00,609 INFO [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of c94c6b105330c7ab03f7d694b18f1b38/info in TestLogRolling-testLogRolling,row0062,1733619042222.c94c6b105330c7ab03f7d694b18f1b38. 2024-12-08T00:51:00,609 INFO [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/385dc132ee304aeb9a209acc12c7437e, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/24bb797d8266425db226a3b6eb8389fd, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/533251601552462597d5edba43a1b33a] into tmpdir=hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp, totalSize=182.1 K 2024-12-08T00:51:00,609 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] compactions.Compactor(225): Compacting 385dc132ee304aeb9a209acc12c7437e, keycount=133, bloomtype=ROW, size=146.0 K, encoding=NONE, compression=NONE, seqNum=263, earliestPutTs=1733619040118 2024-12-08T00:51:00,609 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] compactions.Compactor(225): Compacting 24bb797d8266425db226a3b6eb8389fd, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1733619060540 2024-12-08T00:51:00,610 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] compactions.Compactor(225): Compacting 533251601552462597d5edba43a1b33a, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1733619060565 2024-12-08T00:51:00,620 INFO [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c94c6b105330c7ab03f7d694b18f1b38#info#compaction#89 average throughput is 54.04 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:51:00,621 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp/info/8534f182781a4f129df7cf677e75916a is 1080, key is row0062/info:/1733619040118/Put/seqid=0 2024-12-08T00:51:00,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741875_1051 (size=176582) 2024-12-08T00:51:00,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33409 is added to blk_1073741875_1051 (size=176582) 2024-12-08T00:51:00,628 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp/info/8534f182781a4f129df7cf677e75916a as hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/8534f182781a4f129df7cf677e75916a 2024-12-08T00:51:00,633 INFO [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in c94c6b105330c7ab03f7d694b18f1b38/info of c94c6b105330c7ab03f7d694b18f1b38 into 8534f182781a4f129df7cf677e75916a(size=172.4 K), total size for store is 172.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:51:00,634 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for c94c6b105330c7ab03f7d694b18f1b38: 2024-12-08T00:51:00,634 INFO [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733619042222.c94c6b105330c7ab03f7d694b18f1b38., storeName=c94c6b105330c7ab03f7d694b18f1b38/info, priority=13, startTime=1733619060607; duration=0sec 2024-12-08T00:51:00,634 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:51:00,634 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c94c6b105330c7ab03f7d694b18f1b38:info 2024-12-08T00:51:01,450 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:51:01,473 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:51:02,451 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:51:02,474 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:51:02,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45839 {}] regionserver.HRegion(8855): Flush requested on c94c6b105330c7ab03f7d694b18f1b38 2024-12-08T00:51:02,605 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c94c6b105330c7ab03f7d694b18f1b38 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-08T00:51:02,610 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp/info/ceacf7e665bc462499552f3038537a13 is 1080, key is row0220/info:/1733619060588/Put/seqid=0 2024-12-08T00:51:02,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741876_1052 (size=12523) 2024-12-08T00:51:02,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33409 is added to blk_1073741876_1052 (size=12523) 2024-12-08T00:51:02,616 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=306 (bloomFilter=true), to=hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp/info/ceacf7e665bc462499552f3038537a13 2024-12-08T00:51:02,621 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp/info/ceacf7e665bc462499552f3038537a13 as hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/ceacf7e665bc462499552f3038537a13 2024-12-08T00:51:02,625 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/ceacf7e665bc462499552f3038537a13, entries=7, sequenceid=306, filesize=12.2 K 2024-12-08T00:51:02,626 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10760 for c94c6b105330c7ab03f7d694b18f1b38 in 21ms, sequenceid=306, compaction requested=false 2024-12-08T00:51:02,626 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c94c6b105330c7ab03f7d694b18f1b38: 2024-12-08T00:51:02,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45839 {}] regionserver.HRegion(8855): Flush requested on c94c6b105330c7ab03f7d694b18f1b38 2024-12-08T00:51:02,628 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c94c6b105330c7ab03f7d694b18f1b38 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-12-08T00:51:02,631 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp/info/7fb4425da6414d668c8137d2c3b0bc96 is 1080, key is row0227/info:/1733619062606/Put/seqid=0 2024-12-08T00:51:02,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33409 is added to blk_1073741877_1053 (size=17918) 2024-12-08T00:51:02,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741877_1053 (size=17918) 2024-12-08T00:51:02,637 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=321 (bloomFilter=true), to=hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp/info/7fb4425da6414d668c8137d2c3b0bc96 2024-12-08T00:51:02,642 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp/info/7fb4425da6414d668c8137d2c3b0bc96 as hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/7fb4425da6414d668c8137d2c3b0bc96 2024-12-08T00:51:02,648 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/7fb4425da6414d668c8137d2c3b0bc96, entries=12, sequenceid=321, filesize=17.5 K 2024-12-08T00:51:02,648 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=11.56 KB/11836 for c94c6b105330c7ab03f7d694b18f1b38 in 20ms, sequenceid=321, compaction requested=true 2024-12-08T00:51:02,649 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c94c6b105330c7ab03f7d694b18f1b38: 2024-12-08T00:51:02,649 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c94c6b105330c7ab03f7d694b18f1b38:info, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:51:02,649 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:51:02,649 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:51:02,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45839 {}] regionserver.HRegion(8855): Flush requested on c94c6b105330c7ab03f7d694b18f1b38 2024-12-08T00:51:02,649 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c94c6b105330c7ab03f7d694b18f1b38 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-12-08T00:51:02,650 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 207023 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:51:02,650 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.HStore(1541): c94c6b105330c7ab03f7d694b18f1b38/info is initiating minor compaction (all files) 2024-12-08T00:51:02,650 INFO [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of c94c6b105330c7ab03f7d694b18f1b38/info in TestLogRolling-testLogRolling,row0062,1733619042222.c94c6b105330c7ab03f7d694b18f1b38. 2024-12-08T00:51:02,650 INFO [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/8534f182781a4f129df7cf677e75916a, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/ceacf7e665bc462499552f3038537a13, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/7fb4425da6414d668c8137d2c3b0bc96] into tmpdir=hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp, totalSize=202.2 K 2024-12-08T00:51:02,651 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8534f182781a4f129df7cf677e75916a, keycount=158, bloomtype=ROW, size=172.4 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1733619040118 2024-12-08T00:51:02,651 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] compactions.Compactor(225): Compacting ceacf7e665bc462499552f3038537a13, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=306, earliestPutTs=1733619060588 2024-12-08T00:51:02,651 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7fb4425da6414d668c8137d2c3b0bc96, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=321, earliestPutTs=1733619062606 2024-12-08T00:51:02,652 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp/info/72032b6e652b4204ba6dc89676ddc003 is 1080, key is row0239/info:/1733619062629/Put/seqid=0 2024-12-08T00:51:02,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741878_1054 (size=17918) 2024-12-08T00:51:02,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33409 is added to blk_1073741878_1054 (size=17918) 2024-12-08T00:51:02,657 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=336 (bloomFilter=true), to=hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp/info/72032b6e652b4204ba6dc89676ddc003 2024-12-08T00:51:02,662 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp/info/72032b6e652b4204ba6dc89676ddc003 as hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/72032b6e652b4204ba6dc89676ddc003 2024-12-08T00:51:02,664 INFO [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c94c6b105330c7ab03f7d694b18f1b38#info#compaction#93 average throughput is 45.41 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:51:02,665 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp/info/506439298cd14207a54fe6b75bc0a1c2 is 1080, key is row0062/info:/1733619040118/Put/seqid=0 2024-12-08T00:51:02,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741879_1055 (size=197189) 2024-12-08T00:51:02,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33409 is added to blk_1073741879_1055 (size=197189) 2024-12-08T00:51:02,667 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/72032b6e652b4204ba6dc89676ddc003, entries=12, sequenceid=336, filesize=17.5 K 2024-12-08T00:51:02,668 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=6.30 KB/6456 for c94c6b105330c7ab03f7d694b18f1b38 in 19ms, sequenceid=336, compaction requested=false 2024-12-08T00:51:02,668 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c94c6b105330c7ab03f7d694b18f1b38: 2024-12-08T00:51:02,672 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp/info/506439298cd14207a54fe6b75bc0a1c2 as hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/506439298cd14207a54fe6b75bc0a1c2 2024-12-08T00:51:02,677 INFO [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in c94c6b105330c7ab03f7d694b18f1b38/info of c94c6b105330c7ab03f7d694b18f1b38 into 506439298cd14207a54fe6b75bc0a1c2(size=192.6 K), total size for store is 210.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:51:02,677 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for c94c6b105330c7ab03f7d694b18f1b38: 2024-12-08T00:51:02,677 INFO [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733619042222.c94c6b105330c7ab03f7d694b18f1b38., storeName=c94c6b105330c7ab03f7d694b18f1b38/info, priority=13, startTime=1733619062649; duration=0sec 2024-12-08T00:51:02,677 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:51:02,677 DEBUG [RS:0;0f983e3e5be1:45839-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c94c6b105330c7ab03f7d694b18f1b38:info 2024-12-08T00:51:03,452 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:51:03,475 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:51:04,453 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:51:04,476 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:51:04,662 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-12-08T00:51:04,663 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0f983e3e5be1%2C45839%2C1733619026732.1733619064662 2024-12-08T00:51:04,673 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:51:04,673 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:51:04,674 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:51:04,674 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:51:04,674 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:51:04,674 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/WALs/0f983e3e5be1,45839,1733619026732/0f983e3e5be1%2C45839%2C1733619026732.1733619027242 with entries=320, filesize=311.00 KB; new WAL /user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/WALs/0f983e3e5be1,45839,1733619026732/0f983e3e5be1%2C45839%2C1733619026732.1733619064662 2024-12-08T00:51:04,676 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46357:46357),(127.0.0.1/127.0.0.1:34617:34617)] 2024-12-08T00:51:04,676 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/WALs/0f983e3e5be1,45839,1733619026732/0f983e3e5be1%2C45839%2C1733619026732.1733619027242 is not closed yet, will try archiving it next time 2024-12-08T00:51:04,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741833_1009 (size=318475) 2024-12-08T00:51:04,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33409 is added to blk_1073741833_1009 (size=318475) 2024-12-08T00:51:04,682 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for b7786dedf2ce8631623c86f93f6e1d74: 2024-12-08T00:51:04,682 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing c94c6b105330c7ab03f7d694b18f1b38 1/1 column families, dataSize=6.30 KB heapSize=7 KB 2024-12-08T00:51:04,687 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp/info/4c24c0ea27584db88245aae80e7333e0 is 1080, key is row0251/info:/1733619062650/Put/seqid=0 2024-12-08T00:51:04,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741881_1057 (size=11436) 2024-12-08T00:51:04,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33409 is added to blk_1073741881_1057 (size=11436) 2024-12-08T00:51:04,692 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.30 KB at sequenceid=346 (bloomFilter=true), to=hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp/info/4c24c0ea27584db88245aae80e7333e0 2024-12-08T00:51:04,696 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/.tmp/info/4c24c0ea27584db88245aae80e7333e0 as hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/4c24c0ea27584db88245aae80e7333e0 2024-12-08T00:51:04,701 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/4c24c0ea27584db88245aae80e7333e0, entries=6, sequenceid=346, filesize=11.2 K 2024-12-08T00:51:04,702 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~6.30 KB/6456, heapSize ~6.98 KB/7152, currentSize=0 B/0 for c94c6b105330c7ab03f7d694b18f1b38 in 20ms, sequenceid=346, compaction requested=true 2024-12-08T00:51:04,702 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for c94c6b105330c7ab03f7d694b18f1b38: 2024-12-08T00:51:04,702 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=670 B heapSize=2.02 KB 2024-12-08T00:51:04,706 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/hbase/meta/1588230740/.tmp/info/6de44bb15a58446ead5d3095560e7359 is 186, key is TestLogRolling-testLogRolling,,1733619042222.b7786dedf2ce8631623c86f93f6e1d74./info:regioninfo/1733619042925/Put/seqid=0 2024-12-08T00:51:04,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741882_1058 (size=6153) 2024-12-08T00:51:04,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33409 is added to blk_1073741882_1058 (size=6153) 2024-12-08T00:51:04,711 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=670 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/hbase/meta/1588230740/.tmp/info/6de44bb15a58446ead5d3095560e7359 2024-12-08T00:51:04,715 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/hbase/meta/1588230740/.tmp/info/6de44bb15a58446ead5d3095560e7359 as hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/hbase/meta/1588230740/info/6de44bb15a58446ead5d3095560e7359 2024-12-08T00:51:04,719 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/hbase/meta/1588230740/info/6de44bb15a58446ead5d3095560e7359, entries=5, sequenceid=21, filesize=6.0 K 2024-12-08T00:51:04,720 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~670 B/670, heapSize ~1.25 KB/1280, currentSize=0 B/0 for 1588230740 in 17ms, sequenceid=21, compaction requested=false 2024-12-08T00:51:04,720 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-12-08T00:51:04,720 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0f983e3e5be1%2C45839%2C1733619026732.1733619064720 2024-12-08T00:51:04,724 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:51:04,724 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:51:04,724 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:51:04,724 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:51:04,724 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:51:04,724 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/WALs/0f983e3e5be1,45839,1733619026732/0f983e3e5be1%2C45839%2C1733619026732.1733619064662 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/WALs/0f983e3e5be1,45839,1733619026732/0f983e3e5be1%2C45839%2C1733619026732.1733619064720 2024-12-08T00:51:04,725 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34617:34617),(127.0.0.1/127.0.0.1:46357:46357)] 2024-12-08T00:51:04,725 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/WALs/0f983e3e5be1,45839,1733619026732/0f983e3e5be1%2C45839%2C1733619026732.1733619064662 is not closed yet, will try archiving it next time 2024-12-08T00:51:04,725 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/WALs/0f983e3e5be1,45839,1733619026732/0f983e3e5be1%2C45839%2C1733619026732.1733619027242 to hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/oldWALs/0f983e3e5be1%2C45839%2C1733619026732.1733619027242 2024-12-08T00:51:04,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741880_1056 (size=731) 2024-12-08T00:51:04,726 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-12-08T00:51:04,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33409 is added to blk_1073741880_1056 (size=731) 2024-12-08T00:51:04,726 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/WALs/0f983e3e5be1,45839,1733619026732/0f983e3e5be1%2C45839%2C1733619026732.1733619064662 to hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/oldWALs/0f983e3e5be1%2C45839%2C1733619026732.1733619064662 2024-12-08T00:51:04,826 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-08T00:51:04,826 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-08T00:51:04,826 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T00:51:04,826 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:51:04,826 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:51:04,826 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-08T00:51:04,827 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-08T00:51:04,827 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1293196183, stopped=false 2024-12-08T00:51:04,827 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=0f983e3e5be1,40597,1733619026591 2024-12-08T00:51:04,880 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40597-0x10002f4c92d0000, quorum=127.0.0.1:61574, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T00:51:04,880 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45839-0x10002f4c92d0001, quorum=127.0.0.1:61574, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T00:51:04,880 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45839-0x10002f4c92d0001, quorum=127.0.0.1:61574, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:51:04,880 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40597-0x10002f4c92d0000, quorum=127.0.0.1:61574, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:51:04,880 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-08T00:51:04,881 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-08T00:51:04,881 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T00:51:04,881 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:51:04,881 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40597-0x10002f4c92d0000, quorum=127.0.0.1:61574, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T00:51:04,882 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '0f983e3e5be1,45839,1733619026732' ***** 2024-12-08T00:51:04,882 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45839-0x10002f4c92d0001, quorum=127.0.0.1:61574, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T00:51:04,882 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-08T00:51:04,882 INFO [RS:0;0f983e3e5be1:45839 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-08T00:51:04,882 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-08T00:51:04,883 INFO [RS:0;0f983e3e5be1:45839 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-08T00:51:04,883 INFO [RS:0;0f983e3e5be1:45839 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-08T00:51:04,883 INFO [RS:0;0f983e3e5be1:45839 {}] regionserver.HRegionServer(3091): Received CLOSE for b7786dedf2ce8631623c86f93f6e1d74 2024-12-08T00:51:04,883 INFO [RS:0;0f983e3e5be1:45839 {}] regionserver.HRegionServer(3091): Received CLOSE for c94c6b105330c7ab03f7d694b18f1b38 2024-12-08T00:51:04,883 INFO [RS:0;0f983e3e5be1:45839 {}] regionserver.HRegionServer(959): stopping server 0f983e3e5be1,45839,1733619026732 2024-12-08T00:51:04,883 INFO [RS:0;0f983e3e5be1:45839 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T00:51:04,883 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing b7786dedf2ce8631623c86f93f6e1d74, disabling compactions & flushes 2024-12-08T00:51:04,883 INFO [RS:0;0f983e3e5be1:45839 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;0f983e3e5be1:45839. 2024-12-08T00:51:04,883 INFO [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1733619042222.b7786dedf2ce8631623c86f93f6e1d74. 2024-12-08T00:51:04,884 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1733619042222.b7786dedf2ce8631623c86f93f6e1d74. 2024-12-08T00:51:04,884 DEBUG [RS:0;0f983e3e5be1:45839 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T00:51:04,884 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1733619042222.b7786dedf2ce8631623c86f93f6e1d74. after waiting 0 ms 2024-12-08T00:51:04,884 DEBUG [RS:0;0f983e3e5be1:45839 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:51:04,884 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1733619042222.b7786dedf2ce8631623c86f93f6e1d74. 2024-12-08T00:51:04,884 INFO [RS:0;0f983e3e5be1:45839 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-08T00:51:04,884 INFO [RS:0;0f983e3e5be1:45839 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-08T00:51:04,884 INFO [RS:0;0f983e3e5be1:45839 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-08T00:51:04,884 INFO [RS:0;0f983e3e5be1:45839 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-08T00:51:04,884 INFO [RS:0;0f983e3e5be1:45839 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-12-08T00:51:04,884 DEBUG [RS:0;0f983e3e5be1:45839 {}] regionserver.HRegionServer(1325): Online Regions={b7786dedf2ce8631623c86f93f6e1d74=TestLogRolling-testLogRolling,,1733619042222.b7786dedf2ce8631623c86f93f6e1d74., c94c6b105330c7ab03f7d694b18f1b38=TestLogRolling-testLogRolling,row0062,1733619042222.c94c6b105330c7ab03f7d694b18f1b38., 1588230740=hbase:meta,,1.1588230740} 2024-12-08T00:51:04,884 DEBUG [RS:0;0f983e3e5be1:45839 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, b7786dedf2ce8631623c86f93f6e1d74, c94c6b105330c7ab03f7d694b18f1b38 2024-12-08T00:51:04,884 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-08T00:51:04,885 INFO [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-08T00:51:04,884 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733619042222.b7786dedf2ce8631623c86f93f6e1d74.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/b7786dedf2ce8631623c86f93f6e1d74/info/81017759f2744aff91e22bab9e3a13e2.bcea31935b7ba636d16fda5ff1400227->hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/81017759f2744aff91e22bab9e3a13e2-bottom] to archive 2024-12-08T00:51:04,885 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-08T00:51:04,885 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-08T00:51:04,885 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-08T00:51:04,886 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733619042222.b7786dedf2ce8631623c86f93f6e1d74.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-08T00:51:04,888 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733619042222.b7786dedf2ce8631623c86f93f6e1d74.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/b7786dedf2ce8631623c86f93f6e1d74/info/81017759f2744aff91e22bab9e3a13e2.bcea31935b7ba636d16fda5ff1400227 to hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/archive/data/default/TestLogRolling-testLogRolling/b7786dedf2ce8631623c86f93f6e1d74/info/81017759f2744aff91e22bab9e3a13e2.bcea31935b7ba636d16fda5ff1400227 2024-12-08T00:51:04,889 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733619042222.b7786dedf2ce8631623c86f93f6e1d74.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=0f983e3e5be1:40597 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-12-08T00:51:04,889 WARN [StoreCloser-TestLogRolling-testLogRolling,,1733619042222.b7786dedf2ce8631623c86f93f6e1d74.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-12-08T00:51:04,891 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-12-08T00:51:04,892 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-08T00:51:04,892 INFO [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-08T00:51:04,892 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733619064884Running coprocessor pre-close hooks at 1733619064884Disabling compacts and flushes for region at 1733619064884Disabling writes for close at 1733619064885 (+1 ms)Writing region close event to WAL at 1733619064886 (+1 ms)Running coprocessor post-close hooks at 1733619064892 (+6 ms)Closed at 1733619064892 2024-12-08T00:51:04,892 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-08T00:51:04,893 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/b7786dedf2ce8631623c86f93f6e1d74/recovered.edits/135.seqid, newMaxSeqId=135, maxSeqId=130 2024-12-08T00:51:04,894 INFO [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1733619042222.b7786dedf2ce8631623c86f93f6e1d74. 2024-12-08T00:51:04,894 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for b7786dedf2ce8631623c86f93f6e1d74: Waiting for close lock at 1733619064883Running coprocessor pre-close hooks at 1733619064883Disabling compacts and flushes for region at 1733619064883Disabling writes for close at 1733619064884 (+1 ms)Writing region close event to WAL at 1733619064890 (+6 ms)Running coprocessor post-close hooks at 1733619064894 (+4 ms)Closed at 1733619064894 2024-12-08T00:51:04,894 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1733619042222.b7786dedf2ce8631623c86f93f6e1d74. 2024-12-08T00:51:04,894 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing c94c6b105330c7ab03f7d694b18f1b38, disabling compactions & flushes 2024-12-08T00:51:04,894 INFO [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1733619042222.c94c6b105330c7ab03f7d694b18f1b38. 2024-12-08T00:51:04,894 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1733619042222.c94c6b105330c7ab03f7d694b18f1b38. 2024-12-08T00:51:04,895 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1733619042222.c94c6b105330c7ab03f7d694b18f1b38. after waiting 0 ms 2024-12-08T00:51:04,895 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1733619042222.c94c6b105330c7ab03f7d694b18f1b38. 2024-12-08T00:51:04,895 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733619042222.c94c6b105330c7ab03f7d694b18f1b38.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/81017759f2744aff91e22bab9e3a13e2.bcea31935b7ba636d16fda5ff1400227->hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/bcea31935b7ba636d16fda5ff1400227/info/81017759f2744aff91e22bab9e3a13e2-top, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/TestLogRolling-testLogRolling=bcea31935b7ba636d16fda5ff1400227-e890c85e66a84cc58840e923dc7ebb92, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/aceb76fb70444855882bf0c327b5d6cf, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/TestLogRolling-testLogRolling=bcea31935b7ba636d16fda5ff1400227-8e5aae031e91498bac239b58bdd7ade5, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/06aa4448d91a4a71a8ee09288162a954, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/e0987733de87462cb7f8459456c1e159, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/e9a2a292b11b47f2962e8b18f58f5228, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/9f88d098dcc14c2da4979192bc2f80fa, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/bacc7ac1c45745039f6c902e33d36519, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/003aa50b8f7447c686cdf80c3c6b47b9, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/7357b25d0c6e4edcadf4d8978443276b, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/57ea8e1f0b7d4a6188af1d56c2e84449, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/b9d824d97dbc4857ac235b7679b0b1ea, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/a490992dfd754b8f8a2a4b3146ac59c7, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/6cf6e54e9f0c4ac39beb035f7f199279, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/09179db450f84b94a1ed91b5f98d0fe1, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/3af92e94a83240088860383010391fa7, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/385dc132ee304aeb9a209acc12c7437e, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/6ca78fbcdce544e199e255e5155fd481, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/24bb797d8266425db226a3b6eb8389fd, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/8534f182781a4f129df7cf677e75916a, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/533251601552462597d5edba43a1b33a, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/ceacf7e665bc462499552f3038537a13, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/7fb4425da6414d668c8137d2c3b0bc96] to archive 2024-12-08T00:51:04,896 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733619042222.c94c6b105330c7ab03f7d694b18f1b38.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-08T00:51:04,898 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733619042222.c94c6b105330c7ab03f7d694b18f1b38.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/81017759f2744aff91e22bab9e3a13e2.bcea31935b7ba636d16fda5ff1400227 to hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/archive/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/81017759f2744aff91e22bab9e3a13e2.bcea31935b7ba636d16fda5ff1400227 2024-12-08T00:51:04,899 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733619042222.c94c6b105330c7ab03f7d694b18f1b38.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/TestLogRolling-testLogRolling=bcea31935b7ba636d16fda5ff1400227-e890c85e66a84cc58840e923dc7ebb92 to hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/archive/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/TestLogRolling-testLogRolling=bcea31935b7ba636d16fda5ff1400227-e890c85e66a84cc58840e923dc7ebb92 2024-12-08T00:51:04,900 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733619042222.c94c6b105330c7ab03f7d694b18f1b38.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/aceb76fb70444855882bf0c327b5d6cf to hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/archive/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/aceb76fb70444855882bf0c327b5d6cf 2024-12-08T00:51:04,901 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733619042222.c94c6b105330c7ab03f7d694b18f1b38.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/TestLogRolling-testLogRolling=bcea31935b7ba636d16fda5ff1400227-8e5aae031e91498bac239b58bdd7ade5 to hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/archive/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/TestLogRolling-testLogRolling=bcea31935b7ba636d16fda5ff1400227-8e5aae031e91498bac239b58bdd7ade5 2024-12-08T00:51:04,903 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733619042222.c94c6b105330c7ab03f7d694b18f1b38.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/06aa4448d91a4a71a8ee09288162a954 to hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/archive/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/06aa4448d91a4a71a8ee09288162a954 2024-12-08T00:51:04,904 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733619042222.c94c6b105330c7ab03f7d694b18f1b38.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/e0987733de87462cb7f8459456c1e159 to hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/archive/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/e0987733de87462cb7f8459456c1e159 2024-12-08T00:51:04,905 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733619042222.c94c6b105330c7ab03f7d694b18f1b38.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/e9a2a292b11b47f2962e8b18f58f5228 to hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/archive/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/e9a2a292b11b47f2962e8b18f58f5228 2024-12-08T00:51:04,906 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733619042222.c94c6b105330c7ab03f7d694b18f1b38.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/9f88d098dcc14c2da4979192bc2f80fa to hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/archive/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/9f88d098dcc14c2da4979192bc2f80fa 2024-12-08T00:51:04,908 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733619042222.c94c6b105330c7ab03f7d694b18f1b38.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/bacc7ac1c45745039f6c902e33d36519 to hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/archive/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/bacc7ac1c45745039f6c902e33d36519 2024-12-08T00:51:04,909 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733619042222.c94c6b105330c7ab03f7d694b18f1b38.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/003aa50b8f7447c686cdf80c3c6b47b9 to hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/archive/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/003aa50b8f7447c686cdf80c3c6b47b9 2024-12-08T00:51:04,910 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733619042222.c94c6b105330c7ab03f7d694b18f1b38.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/7357b25d0c6e4edcadf4d8978443276b to hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/archive/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/7357b25d0c6e4edcadf4d8978443276b 2024-12-08T00:51:04,911 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733619042222.c94c6b105330c7ab03f7d694b18f1b38.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/57ea8e1f0b7d4a6188af1d56c2e84449 to hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/archive/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/57ea8e1f0b7d4a6188af1d56c2e84449 2024-12-08T00:51:04,913 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733619042222.c94c6b105330c7ab03f7d694b18f1b38.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/b9d824d97dbc4857ac235b7679b0b1ea to hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/archive/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/b9d824d97dbc4857ac235b7679b0b1ea 2024-12-08T00:51:04,914 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733619042222.c94c6b105330c7ab03f7d694b18f1b38.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/a490992dfd754b8f8a2a4b3146ac59c7 to hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/archive/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/a490992dfd754b8f8a2a4b3146ac59c7 2024-12-08T00:51:04,915 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733619042222.c94c6b105330c7ab03f7d694b18f1b38.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/6cf6e54e9f0c4ac39beb035f7f199279 to hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/archive/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/6cf6e54e9f0c4ac39beb035f7f199279 2024-12-08T00:51:04,916 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733619042222.c94c6b105330c7ab03f7d694b18f1b38.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/09179db450f84b94a1ed91b5f98d0fe1 to hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/archive/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/09179db450f84b94a1ed91b5f98d0fe1 2024-12-08T00:51:04,917 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733619042222.c94c6b105330c7ab03f7d694b18f1b38.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/3af92e94a83240088860383010391fa7 to hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/archive/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/3af92e94a83240088860383010391fa7 2024-12-08T00:51:04,918 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733619042222.c94c6b105330c7ab03f7d694b18f1b38.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/385dc132ee304aeb9a209acc12c7437e to hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/archive/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/385dc132ee304aeb9a209acc12c7437e 2024-12-08T00:51:04,919 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733619042222.c94c6b105330c7ab03f7d694b18f1b38.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/6ca78fbcdce544e199e255e5155fd481 to hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/archive/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/6ca78fbcdce544e199e255e5155fd481 2024-12-08T00:51:04,920 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733619042222.c94c6b105330c7ab03f7d694b18f1b38.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/24bb797d8266425db226a3b6eb8389fd to hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/archive/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/24bb797d8266425db226a3b6eb8389fd 2024-12-08T00:51:04,921 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733619042222.c94c6b105330c7ab03f7d694b18f1b38.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/8534f182781a4f129df7cf677e75916a to hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/archive/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/8534f182781a4f129df7cf677e75916a 2024-12-08T00:51:04,922 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733619042222.c94c6b105330c7ab03f7d694b18f1b38.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/533251601552462597d5edba43a1b33a to hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/archive/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/533251601552462597d5edba43a1b33a 2024-12-08T00:51:04,923 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733619042222.c94c6b105330c7ab03f7d694b18f1b38.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/ceacf7e665bc462499552f3038537a13 to hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/archive/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/ceacf7e665bc462499552f3038537a13 2024-12-08T00:51:04,924 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733619042222.c94c6b105330c7ab03f7d694b18f1b38.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/7fb4425da6414d668c8137d2c3b0bc96 to hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/archive/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/info/7fb4425da6414d668c8137d2c3b0bc96 2024-12-08T00:51:04,924 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1733619042222.c94c6b105330c7ab03f7d694b18f1b38.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [aceb76fb70444855882bf0c327b5d6cf=42984, 06aa4448d91a4a71a8ee09288162a954=12516, e0987733de87462cb7f8459456c1e159=62558, e9a2a292b11b47f2962e8b18f58f5228=16828, 9f88d098dcc14c2da4979192bc2f80fa=17906, bacc7ac1c45745039f6c902e33d36519=83215, 003aa50b8f7447c686cdf80c3c6b47b9=12516, 7357b25d0c6e4edcadf4d8978443276b=17906, 57ea8e1f0b7d4a6188af1d56c2e84449=108119, b9d824d97dbc4857ac235b7679b0b1ea=16828, a490992dfd754b8f8a2a4b3146ac59c7=12516, 6cf6e54e9f0c4ac39beb035f7f199279=128835, 09179db450f84b94a1ed91b5f98d0fe1=17906, 3af92e94a83240088860383010391fa7=17906, 385dc132ee304aeb9a209acc12c7437e=149497, 6ca78fbcdce544e199e255e5155fd481=12521, 24bb797d8266425db226a3b6eb8389fd=20092, 8534f182781a4f129df7cf677e75916a=176582, 533251601552462597d5edba43a1b33a=16839, ceacf7e665bc462499552f3038537a13=12523, 7fb4425da6414d668c8137d2c3b0bc96=17918] 2024-12-08T00:51:04,927 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/data/default/TestLogRolling-testLogRolling/c94c6b105330c7ab03f7d694b18f1b38/recovered.edits/349.seqid, newMaxSeqId=349, maxSeqId=130 2024-12-08T00:51:04,927 INFO [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1733619042222.c94c6b105330c7ab03f7d694b18f1b38. 2024-12-08T00:51:04,927 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for c94c6b105330c7ab03f7d694b18f1b38: Waiting for close lock at 1733619064894Running coprocessor pre-close hooks at 1733619064894Disabling compacts and flushes for region at 1733619064894Disabling writes for close at 1733619064895 (+1 ms)Writing region close event to WAL at 1733619064924 (+29 ms)Running coprocessor post-close hooks at 1733619064927 (+3 ms)Closed at 1733619064927 2024-12-08T00:51:04,927 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1733619042222.c94c6b105330c7ab03f7d694b18f1b38. 2024-12-08T00:51:05,085 INFO [RS:0;0f983e3e5be1:45839 {}] regionserver.HRegionServer(976): stopping server 0f983e3e5be1,45839,1733619026732; all regions closed. 2024-12-08T00:51:05,085 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:51:05,085 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:51:05,085 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:51:05,086 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:51:05,086 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:51:05,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741834_1010 (size=8107) 2024-12-08T00:51:05,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33409 is added to blk_1073741834_1010 (size=8107) 2024-12-08T00:51:05,092 DEBUG [RS:0;0f983e3e5be1:45839 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/oldWALs 2024-12-08T00:51:05,092 INFO [RS:0;0f983e3e5be1:45839 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 0f983e3e5be1%2C45839%2C1733619026732.meta:.meta(num 1733619027696) 2024-12-08T00:51:05,092 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:51:05,093 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:51:05,093 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:51:05,093 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:51:05,093 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:51:05,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33409 is added to blk_1073741883_1059 (size=780) 2024-12-08T00:51:05,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741883_1059 (size=780) 2024-12-08T00:51:05,098 DEBUG [RS:0;0f983e3e5be1:45839 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/oldWALs 2024-12-08T00:51:05,098 INFO [RS:0;0f983e3e5be1:45839 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 0f983e3e5be1%2C45839%2C1733619026732:(num 1733619064720) 2024-12-08T00:51:05,098 DEBUG [RS:0;0f983e3e5be1:45839 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:51:05,098 INFO [RS:0;0f983e3e5be1:45839 {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T00:51:05,098 INFO [RS:0;0f983e3e5be1:45839 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T00:51:05,099 INFO [RS:0;0f983e3e5be1:45839 {}] hbase.ChoreService(370): Chore service for: regionserver/0f983e3e5be1:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-08T00:51:05,099 INFO [RS:0;0f983e3e5be1:45839 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T00:51:05,099 INFO [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T00:51:05,099 INFO [RS:0;0f983e3e5be1:45839 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45839 2024-12-08T00:51:05,112 INFO [regionserver/0f983e3e5be1:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T00:51:05,130 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45839-0x10002f4c92d0001, quorum=127.0.0.1:61574, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/0f983e3e5be1,45839,1733619026732 2024-12-08T00:51:05,130 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40597-0x10002f4c92d0000, quorum=127.0.0.1:61574, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T00:51:05,131 INFO [RS:0;0f983e3e5be1:45839 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T00:51:05,139 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [0f983e3e5be1,45839,1733619026732] 2024-12-08T00:51:05,147 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/0f983e3e5be1,45839,1733619026732 already deleted, retry=false 2024-12-08T00:51:05,147 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 0f983e3e5be1,45839,1733619026732 expired; onlineServers=0 2024-12-08T00:51:05,147 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '0f983e3e5be1,40597,1733619026591' ***** 2024-12-08T00:51:05,147 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-08T00:51:05,148 INFO [M:0;0f983e3e5be1:40597 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T00:51:05,148 INFO [M:0;0f983e3e5be1:40597 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T00:51:05,149 DEBUG [M:0;0f983e3e5be1:40597 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-08T00:51:05,149 DEBUG [M:0;0f983e3e5be1:40597 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-08T00:51:05,149 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-08T00:51:05,149 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster-HFileCleaner.small.0-1733619027056 {}] cleaner.HFileCleaner(306): Exit Thread[master/0f983e3e5be1:0:becomeActiveMaster-HFileCleaner.small.0-1733619027056,5,FailOnTimeoutGroup] 2024-12-08T00:51:05,149 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster-HFileCleaner.large.0-1733619027054 {}] cleaner.HFileCleaner(306): Exit Thread[master/0f983e3e5be1:0:becomeActiveMaster-HFileCleaner.large.0-1733619027054,5,FailOnTimeoutGroup] 2024-12-08T00:51:05,150 INFO [M:0;0f983e3e5be1:40597 {}] hbase.ChoreService(370): Chore service for: master/0f983e3e5be1:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-08T00:51:05,150 INFO [M:0;0f983e3e5be1:40597 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T00:51:05,150 DEBUG [M:0;0f983e3e5be1:40597 {}] master.HMaster(1795): Stopping service threads 2024-12-08T00:51:05,150 INFO [M:0;0f983e3e5be1:40597 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-08T00:51:05,150 INFO [M:0;0f983e3e5be1:40597 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-08T00:51:05,151 INFO [M:0;0f983e3e5be1:40597 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-08T00:51:05,151 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-08T00:51:05,156 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40597-0x10002f4c92d0000, quorum=127.0.0.1:61574, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-08T00:51:05,156 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40597-0x10002f4c92d0000, quorum=127.0.0.1:61574, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:51:05,156 DEBUG [M:0;0f983e3e5be1:40597 {}] zookeeper.ZKUtil(347): master:40597-0x10002f4c92d0000, quorum=127.0.0.1:61574, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-08T00:51:05,156 WARN [M:0;0f983e3e5be1:40597 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-08T00:51:05,157 INFO [M:0;0f983e3e5be1:40597 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/.lastflushedseqids 2024-12-08T00:51:05,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741884_1060 (size=228) 2024-12-08T00:51:05,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33409 is added to blk_1073741884_1060 (size=228) 2024-12-08T00:51:05,165 INFO [M:0;0f983e3e5be1:40597 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-08T00:51:05,165 INFO [M:0;0f983e3e5be1:40597 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-08T00:51:05,165 DEBUG [M:0;0f983e3e5be1:40597 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-08T00:51:05,166 INFO [M:0;0f983e3e5be1:40597 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:51:05,166 DEBUG [M:0;0f983e3e5be1:40597 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:51:05,166 DEBUG [M:0;0f983e3e5be1:40597 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-08T00:51:05,166 DEBUG [M:0;0f983e3e5be1:40597 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:51:05,166 INFO [M:0;0f983e3e5be1:40597 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.42 KB heapSize=63.36 KB 2024-12-08T00:51:05,187 DEBUG [M:0;0f983e3e5be1:40597 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f6849a1ec9ce4dfcb3f7510fb5336408 is 82, key is hbase:meta,,1/info:regioninfo/1733619027721/Put/seqid=0 2024-12-08T00:51:05,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33409 is added to blk_1073741885_1061 (size=5672) 2024-12-08T00:51:05,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741885_1061 (size=5672) 2024-12-08T00:51:05,192 INFO [M:0;0f983e3e5be1:40597 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f6849a1ec9ce4dfcb3f7510fb5336408 2024-12-08T00:51:05,207 DEBUG [M:0;0f983e3e5be1:40597 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/89aaf6868f7b424cb787a389fdc04fb8 is 749, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733619028246/Put/seqid=0 2024-12-08T00:51:05,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33409 is added to blk_1073741886_1062 (size=7089) 2024-12-08T00:51:05,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741886_1062 (size=7089) 2024-12-08T00:51:05,212 INFO [M:0;0f983e3e5be1:40597 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.81 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/89aaf6868f7b424cb787a389fdc04fb8 2024-12-08T00:51:05,215 INFO [M:0;0f983e3e5be1:40597 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 89aaf6868f7b424cb787a389fdc04fb8 2024-12-08T00:51:05,227 DEBUG [M:0;0f983e3e5be1:40597 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5df616c23a8646499c6992796558df78 is 69, key is 0f983e3e5be1,45839,1733619026732/rs:state/1733619027091/Put/seqid=0 2024-12-08T00:51:05,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33409 is added to blk_1073741887_1063 (size=5156) 2024-12-08T00:51:05,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741887_1063 (size=5156) 2024-12-08T00:51:05,231 INFO [M:0;0f983e3e5be1:40597 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5df616c23a8646499c6992796558df78 2024-12-08T00:51:05,239 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45839-0x10002f4c92d0001, quorum=127.0.0.1:61574, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T00:51:05,239 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45839-0x10002f4c92d0001, quorum=127.0.0.1:61574, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T00:51:05,239 INFO [RS:0;0f983e3e5be1:45839 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T00:51:05,239 INFO [RS:0;0f983e3e5be1:45839 {}] regionserver.HRegionServer(1031): Exiting; stopping=0f983e3e5be1,45839,1733619026732; zookeeper connection closed. 2024-12-08T00:51:05,239 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@491a154e {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@491a154e 2024-12-08T00:51:05,239 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-08T00:51:05,247 DEBUG [M:0;0f983e3e5be1:40597 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/2901b4c8244549409b46278e4b6f0676 is 52, key is load_balancer_on/state:d/1733619027864/Put/seqid=0 2024-12-08T00:51:05,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33409 is added to blk_1073741888_1064 (size=5056) 2024-12-08T00:51:05,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741888_1064 (size=5056) 2024-12-08T00:51:05,251 INFO [M:0;0f983e3e5be1:40597 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/2901b4c8244549409b46278e4b6f0676 2024-12-08T00:51:05,256 DEBUG [M:0;0f983e3e5be1:40597 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f6849a1ec9ce4dfcb3f7510fb5336408 as hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f6849a1ec9ce4dfcb3f7510fb5336408 2024-12-08T00:51:05,260 INFO [M:0;0f983e3e5be1:40597 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f6849a1ec9ce4dfcb3f7510fb5336408, entries=8, sequenceid=125, filesize=5.5 K 2024-12-08T00:51:05,261 DEBUG [M:0;0f983e3e5be1:40597 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/89aaf6868f7b424cb787a389fdc04fb8 as hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/89aaf6868f7b424cb787a389fdc04fb8 2024-12-08T00:51:05,265 INFO [M:0;0f983e3e5be1:40597 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 89aaf6868f7b424cb787a389fdc04fb8 2024-12-08T00:51:05,265 INFO [M:0;0f983e3e5be1:40597 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/89aaf6868f7b424cb787a389fdc04fb8, entries=13, sequenceid=125, filesize=6.9 K 2024-12-08T00:51:05,266 DEBUG [M:0;0f983e3e5be1:40597 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5df616c23a8646499c6992796558df78 as hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/5df616c23a8646499c6992796558df78 2024-12-08T00:51:05,271 INFO [M:0;0f983e3e5be1:40597 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/5df616c23a8646499c6992796558df78, entries=1, sequenceid=125, filesize=5.0 K 2024-12-08T00:51:05,272 DEBUG [M:0;0f983e3e5be1:40597 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/2901b4c8244549409b46278e4b6f0676 as hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/2901b4c8244549409b46278e4b6f0676 2024-12-08T00:51:05,277 INFO [M:0;0f983e3e5be1:40597 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41911/user/jenkins/test-data/c659dd62-69ae-2b40-44ec-73d065f7fb92/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/2901b4c8244549409b46278e4b6f0676, entries=1, sequenceid=125, filesize=4.9 K 2024-12-08T00:51:05,278 INFO [M:0;0f983e3e5be1:40597 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.42 KB/52651, heapSize ~63.30 KB/64816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 112ms, sequenceid=125, compaction requested=false 2024-12-08T00:51:05,279 INFO [M:0;0f983e3e5be1:40597 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:51:05,279 DEBUG [M:0;0f983e3e5be1:40597 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733619065165Disabling compacts and flushes for region at 1733619065165Disabling writes for close at 1733619065166 (+1 ms)Obtaining lock to block concurrent updates at 1733619065166Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733619065166Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52651, getHeapSize=64816, getOffHeapSize=0, getCellsCount=148 at 1733619065166Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733619065167 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733619065167Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733619065187 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733619065187Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733619065196 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733619065207 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733619065207Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733619065215 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733619065226 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733619065226Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733619065235 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733619065247 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733619065247Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@61f3738d: reopening flushed file at 1733619065255 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@32f95755: reopening flushed file at 1733619065260 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3c11cc45: reopening flushed file at 1733619065265 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@775e2e83: reopening flushed file at 1733619065271 (+6 ms)Finished flush of dataSize ~51.42 KB/52651, heapSize ~63.30 KB/64816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 112ms, sequenceid=125, compaction requested=false at 1733619065278 (+7 ms)Writing region close event to WAL at 1733619065279 (+1 ms)Closed at 1733619065279 2024-12-08T00:51:05,280 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:51:05,280 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:51:05,280 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:51:05,280 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:51:05,280 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:51:05,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33409 is added to blk_1073741830_1006 (size=61320) 2024-12-08T00:51:05,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741830_1006 (size=61320) 2024-12-08T00:51:05,282 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T00:51:05,282 INFO [M:0;0f983e3e5be1:40597 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-08T00:51:05,282 INFO [M:0;0f983e3e5be1:40597 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40597 2024-12-08T00:51:05,282 INFO [M:0;0f983e3e5be1:40597 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T00:51:05,414 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40597-0x10002f4c92d0000, quorum=127.0.0.1:61574, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T00:51:05,414 INFO [M:0;0f983e3e5be1:40597 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T00:51:05,414 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40597-0x10002f4c92d0000, quorum=127.0.0.1:61574, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T00:51:05,416 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6d46424b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T00:51:05,417 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6ef289a6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T00:51:05,417 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T00:51:05,417 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@49e6a30e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T00:51:05,417 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@58c8e0ef{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/33d99b1d-8c99-1a06-f136-b66e714a7006/hadoop.log.dir/,STOPPED} 2024-12-08T00:51:05,419 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T00:51:05,419 WARN [BP-1938729576-172.17.0.2-1733619025033 heartbeating to localhost/127.0.0.1:41911 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T00:51:05,419 WARN [BP-1938729576-172.17.0.2-1733619025033 heartbeating to localhost/127.0.0.1:41911 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1938729576-172.17.0.2-1733619025033 (Datanode Uuid 42e28748-3dcb-4583-b257-0a9e28be7ac4) service to localhost/127.0.0.1:41911 2024-12-08T00:51:05,419 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T00:51:05,420 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/33d99b1d-8c99-1a06-f136-b66e714a7006/cluster_0946dd5b-4961-eb56-9129-ebe145a0235c/data/data3/current/BP-1938729576-172.17.0.2-1733619025033 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T00:51:05,420 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/33d99b1d-8c99-1a06-f136-b66e714a7006/cluster_0946dd5b-4961-eb56-9129-ebe145a0235c/data/data4/current/BP-1938729576-172.17.0.2-1733619025033 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T00:51:05,420 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T00:51:05,422 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2c08daf8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T00:51:05,423 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1a78718c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T00:51:05,423 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T00:51:05,423 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@18472eb7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T00:51:05,423 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@18b8d361{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/33d99b1d-8c99-1a06-f136-b66e714a7006/hadoop.log.dir/,STOPPED} 2024-12-08T00:51:05,425 WARN [BP-1938729576-172.17.0.2-1733619025033 heartbeating to localhost/127.0.0.1:41911 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T00:51:05,425 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T00:51:05,425 WARN [BP-1938729576-172.17.0.2-1733619025033 heartbeating to localhost/127.0.0.1:41911 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1938729576-172.17.0.2-1733619025033 (Datanode Uuid 456eafa8-bfeb-40a9-918d-47a178e6e0a5) service to localhost/127.0.0.1:41911 2024-12-08T00:51:05,425 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T00:51:05,425 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/33d99b1d-8c99-1a06-f136-b66e714a7006/cluster_0946dd5b-4961-eb56-9129-ebe145a0235c/data/data1/current/BP-1938729576-172.17.0.2-1733619025033 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T00:51:05,425 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/33d99b1d-8c99-1a06-f136-b66e714a7006/cluster_0946dd5b-4961-eb56-9129-ebe145a0235c/data/data2/current/BP-1938729576-172.17.0.2-1733619025033 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T00:51:05,426 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T00:51:05,431 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4355bcf7{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-08T00:51:05,432 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@65f4ddad{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T00:51:05,432 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T00:51:05,432 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@831cfa{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T00:51:05,432 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@9538b4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/33d99b1d-8c99-1a06-f136-b66e714a7006/hadoop.log.dir/,STOPPED} 2024-12-08T00:51:05,438 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-08T00:51:05,454 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:51:05,463 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-08T00:51:05,470 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=232 (was 209) Potentially hanging thread: LeaseRenewer:jenkins@localhost:41911 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41911 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:41911 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:41911 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:41911 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41911 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41911 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:41911 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=515 (was 483) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=85 (was 42) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=17251 (was 17287) 2024-12-08T00:51:05,477 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=232, OpenFileDescriptor=515, MaxFileDescriptor=1048576, SystemLoadAverage=85, ProcessCount=11, AvailableMemoryMB=17251 2024-12-08T00:51:05,477 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-08T00:51:05,477 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/33d99b1d-8c99-1a06-f136-b66e714a7006/hadoop.log.dir so I do NOT create it in target/test-data/b00dbc42-c2ae-8329-7b75-f6549bfdae67 2024-12-08T00:51:05,477 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/33d99b1d-8c99-1a06-f136-b66e714a7006/hadoop.tmp.dir so I do NOT create it in target/test-data/b00dbc42-c2ae-8329-7b75-f6549bfdae67 2024-12-08T00:51:05,477 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b00dbc42-c2ae-8329-7b75-f6549bfdae67/cluster_6c8cb891-6415-4cfe-b540-e3580e1e26a6, deleteOnExit=true 2024-12-08T00:51:05,477 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:51:05,477 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-08T00:51:05,477 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b00dbc42-c2ae-8329-7b75-f6549bfdae67/test.cache.data in system properties and HBase conf 2024-12-08T00:51:05,477 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b00dbc42-c2ae-8329-7b75-f6549bfdae67/hadoop.tmp.dir in system properties and HBase conf 2024-12-08T00:51:05,477 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b00dbc42-c2ae-8329-7b75-f6549bfdae67/hadoop.log.dir in system properties and HBase conf 2024-12-08T00:51:05,477 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b00dbc42-c2ae-8329-7b75-f6549bfdae67/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-08T00:51:05,477 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b00dbc42-c2ae-8329-7b75-f6549bfdae67/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-08T00:51:05,477 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-08T00:51:05,478 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-08T00:51:05,478 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b00dbc42-c2ae-8329-7b75-f6549bfdae67/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-08T00:51:05,478 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b00dbc42-c2ae-8329-7b75-f6549bfdae67/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-08T00:51:05,478 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b00dbc42-c2ae-8329-7b75-f6549bfdae67/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-08T00:51:05,478 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b00dbc42-c2ae-8329-7b75-f6549bfdae67/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-08T00:51:05,478 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b00dbc42-c2ae-8329-7b75-f6549bfdae67/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-08T00:51:05,478 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b00dbc42-c2ae-8329-7b75-f6549bfdae67/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-08T00:51:05,478 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b00dbc42-c2ae-8329-7b75-f6549bfdae67/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-08T00:51:05,478 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b00dbc42-c2ae-8329-7b75-f6549bfdae67/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-08T00:51:05,478 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b00dbc42-c2ae-8329-7b75-f6549bfdae67/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-08T00:51:05,478 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b00dbc42-c2ae-8329-7b75-f6549bfdae67/nfs.dump.dir in system properties and HBase conf 2024-12-08T00:51:05,478 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b00dbc42-c2ae-8329-7b75-f6549bfdae67/java.io.tmpdir in system properties and HBase conf 2024-12-08T00:51:05,478 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b00dbc42-c2ae-8329-7b75-f6549bfdae67/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-08T00:51:05,478 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b00dbc42-c2ae-8329-7b75-f6549bfdae67/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-08T00:51:05,478 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b00dbc42-c2ae-8329-7b75-f6549bfdae67/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-08T00:51:05,489 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-08T00:51:05,858 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T00:51:05,861 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T00:51:05,862 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T00:51:05,862 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T00:51:05,862 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-08T00:51:05,862 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T00:51:05,862 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@13a857dd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b00dbc42-c2ae-8329-7b75-f6549bfdae67/hadoop.log.dir/,AVAILABLE} 2024-12-08T00:51:05,863 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2fe718c7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T00:51:05,911 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-08T00:51:05,911 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-08T00:51:05,912 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-08T00:51:05,912 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-12-08T00:51:05,951 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@39a32533{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b00dbc42-c2ae-8329-7b75-f6549bfdae67/java.io.tmpdir/jetty-localhost-44477-hadoop-hdfs-3_4_1-tests_jar-_-any-4083676823194600627/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-08T00:51:05,951 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2dcca062{HTTP/1.1, (http/1.1)}{localhost:44477} 2024-12-08T00:51:05,951 INFO [Time-limited test {}] server.Server(415): Started @285381ms 2024-12-08T00:51:05,961 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-08T00:51:06,161 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T00:51:06,164 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T00:51:06,165 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T00:51:06,165 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T00:51:06,165 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T00:51:06,165 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@660a9944{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b00dbc42-c2ae-8329-7b75-f6549bfdae67/hadoop.log.dir/,AVAILABLE} 2024-12-08T00:51:06,166 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3d5d7f03{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T00:51:06,253 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5c21148f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b00dbc42-c2ae-8329-7b75-f6549bfdae67/java.io.tmpdir/jetty-localhost-35993-hadoop-hdfs-3_4_1-tests_jar-_-any-13931580890409897982/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T00:51:06,254 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@20eba1fc{HTTP/1.1, (http/1.1)}{localhost:35993} 2024-12-08T00:51:06,254 INFO [Time-limited test {}] server.Server(415): Started @285683ms 2024-12-08T00:51:06,254 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T00:51:06,277 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T00:51:06,279 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T00:51:06,280 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T00:51:06,280 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T00:51:06,280 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-08T00:51:06,280 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5f73177b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b00dbc42-c2ae-8329-7b75-f6549bfdae67/hadoop.log.dir/,AVAILABLE} 2024-12-08T00:51:06,280 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5a6bd873{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T00:51:06,372 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@33430efa{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b00dbc42-c2ae-8329-7b75-f6549bfdae67/java.io.tmpdir/jetty-localhost-43665-hadoop-hdfs-3_4_1-tests_jar-_-any-10625638972918810542/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T00:51:06,373 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2050b40d{HTTP/1.1, (http/1.1)}{localhost:43665} 2024-12-08T00:51:06,373 INFO [Time-limited test {}] server.Server(415): Started @285803ms 2024-12-08T00:51:06,374 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T00:51:06,455 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:51:06,477 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:51:06,944 WARN [Thread-2501 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b00dbc42-c2ae-8329-7b75-f6549bfdae67/cluster_6c8cb891-6415-4cfe-b540-e3580e1e26a6/data/data1/current/BP-547448715-172.17.0.2-1733619065492/current, will proceed with Du for space computation calculation, 2024-12-08T00:51:06,944 WARN [Thread-2502 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b00dbc42-c2ae-8329-7b75-f6549bfdae67/cluster_6c8cb891-6415-4cfe-b540-e3580e1e26a6/data/data2/current/BP-547448715-172.17.0.2-1733619065492/current, will proceed with Du for space computation calculation, 2024-12-08T00:51:06,959 WARN [Thread-2466 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T00:51:06,961 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x92500bafdc46d873 with lease ID 0x29fe2a5a67794ef9: Processing first storage report for DS-5648a5c7-637d-47e1-80cb-a318de25f803 from datanode DatanodeRegistration(127.0.0.1:41313, datanodeUuid=8b1ca667-e9ca-498a-a7b3-9c9f98763352, infoPort=46591, infoSecurePort=0, ipcPort=39261, storageInfo=lv=-57;cid=testClusterID;nsid=69003937;c=1733619065492) 2024-12-08T00:51:06,962 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x92500bafdc46d873 with lease ID 0x29fe2a5a67794ef9: from storage DS-5648a5c7-637d-47e1-80cb-a318de25f803 node DatanodeRegistration(127.0.0.1:41313, datanodeUuid=8b1ca667-e9ca-498a-a7b3-9c9f98763352, infoPort=46591, infoSecurePort=0, ipcPort=39261, storageInfo=lv=-57;cid=testClusterID;nsid=69003937;c=1733619065492), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T00:51:06,962 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x92500bafdc46d873 with lease ID 0x29fe2a5a67794ef9: Processing first storage report for DS-6a12ae34-f17f-45e4-b6d7-4e23503724c7 from datanode DatanodeRegistration(127.0.0.1:41313, datanodeUuid=8b1ca667-e9ca-498a-a7b3-9c9f98763352, infoPort=46591, infoSecurePort=0, ipcPort=39261, storageInfo=lv=-57;cid=testClusterID;nsid=69003937;c=1733619065492) 2024-12-08T00:51:06,962 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x92500bafdc46d873 with lease ID 0x29fe2a5a67794ef9: from storage DS-6a12ae34-f17f-45e4-b6d7-4e23503724c7 node DatanodeRegistration(127.0.0.1:41313, datanodeUuid=8b1ca667-e9ca-498a-a7b3-9c9f98763352, infoPort=46591, infoSecurePort=0, ipcPort=39261, storageInfo=lv=-57;cid=testClusterID;nsid=69003937;c=1733619065492), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-08T00:51:07,176 WARN [Thread-2513 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b00dbc42-c2ae-8329-7b75-f6549bfdae67/cluster_6c8cb891-6415-4cfe-b540-e3580e1e26a6/data/data3/current/BP-547448715-172.17.0.2-1733619065492/current, will proceed with Du for space computation calculation, 2024-12-08T00:51:07,176 WARN [Thread-2514 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b00dbc42-c2ae-8329-7b75-f6549bfdae67/cluster_6c8cb891-6415-4cfe-b540-e3580e1e26a6/data/data4/current/BP-547448715-172.17.0.2-1733619065492/current, will proceed with Du for space computation calculation, 2024-12-08T00:51:07,197 WARN [Thread-2489 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T00:51:07,198 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1542734a3495202e with lease ID 0x29fe2a5a67794efa: Processing first storage report for DS-c0aebd1a-6a70-4745-a6b6-3edb912f4d3c from datanode DatanodeRegistration(127.0.0.1:38907, datanodeUuid=25605410-68a3-4dda-82cd-6a33cd3f4711, infoPort=42831, infoSecurePort=0, ipcPort=39899, storageInfo=lv=-57;cid=testClusterID;nsid=69003937;c=1733619065492) 2024-12-08T00:51:07,199 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1542734a3495202e with lease ID 0x29fe2a5a67794efa: from storage DS-c0aebd1a-6a70-4745-a6b6-3edb912f4d3c node DatanodeRegistration(127.0.0.1:38907, datanodeUuid=25605410-68a3-4dda-82cd-6a33cd3f4711, infoPort=42831, infoSecurePort=0, ipcPort=39899, storageInfo=lv=-57;cid=testClusterID;nsid=69003937;c=1733619065492), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T00:51:07,199 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1542734a3495202e with lease ID 0x29fe2a5a67794efa: Processing first storage report for DS-8a43d45a-b45a-4e02-9329-d80d2054b213 from datanode DatanodeRegistration(127.0.0.1:38907, datanodeUuid=25605410-68a3-4dda-82cd-6a33cd3f4711, infoPort=42831, infoSecurePort=0, ipcPort=39899, storageInfo=lv=-57;cid=testClusterID;nsid=69003937;c=1733619065492) 2024-12-08T00:51:07,199 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1542734a3495202e with lease ID 0x29fe2a5a67794efa: from storage DS-8a43d45a-b45a-4e02-9329-d80d2054b213 node DatanodeRegistration(127.0.0.1:38907, datanodeUuid=25605410-68a3-4dda-82cd-6a33cd3f4711, infoPort=42831, infoSecurePort=0, ipcPort=39899, storageInfo=lv=-57;cid=testClusterID;nsid=69003937;c=1733619065492), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-08T00:51:07,299 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b00dbc42-c2ae-8329-7b75-f6549bfdae67 2024-12-08T00:51:07,335 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b00dbc42-c2ae-8329-7b75-f6549bfdae67/cluster_6c8cb891-6415-4cfe-b540-e3580e1e26a6/zookeeper_0, clientPort=51293, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b00dbc42-c2ae-8329-7b75-f6549bfdae67/cluster_6c8cb891-6415-4cfe-b540-e3580e1e26a6/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b00dbc42-c2ae-8329-7b75-f6549bfdae67/cluster_6c8cb891-6415-4cfe-b540-e3580e1e26a6/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-08T00:51:07,336 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=51293 2024-12-08T00:51:07,337 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:51:07,340 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:51:07,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41313 is added to blk_1073741825_1001 (size=7) 2024-12-08T00:51:07,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38907 is added to blk_1073741825_1001 (size=7) 2024-12-08T00:51:07,348 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:38827/user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74 with version=8 2024-12-08T00:51:07,348 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:44103/user/jenkins/test-data/4b636e01-451d-18f5-b93b-69b98a6af557/hbase-staging 2024-12-08T00:51:07,350 INFO [Time-limited test {}] client.ConnectionUtils(128): master/0f983e3e5be1:0 server-side Connection retries=45 2024-12-08T00:51:07,350 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T00:51:07,350 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T00:51:07,350 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T00:51:07,350 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T00:51:07,350 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T00:51:07,350 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-08T00:51:07,350 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T00:51:07,351 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34507 2024-12-08T00:51:07,352 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:34507 connecting to ZooKeeper ensemble=127.0.0.1:51293 2024-12-08T00:51:07,397 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:345070x0, quorum=127.0.0.1:51293, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T00:51:07,398 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34507-0x10002f568610000 connected 2024-12-08T00:51:07,455 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:51:07,471 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:51:07,472 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:51:07,474 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34507-0x10002f568610000, quorum=127.0.0.1:51293, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T00:51:07,474 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:38827/user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74, hbase.cluster.distributed=false 2024-12-08T00:51:07,476 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34507-0x10002f568610000, quorum=127.0.0.1:51293, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T00:51:07,476 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34507 2024-12-08T00:51:07,477 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34507 2024-12-08T00:51:07,477 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34507 2024-12-08T00:51:07,477 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34507 2024-12-08T00:51:07,478 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34507 2024-12-08T00:51:07,478 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:51:07,493 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/0f983e3e5be1:0 server-side Connection retries=45 2024-12-08T00:51:07,493 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T00:51:07,493 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T00:51:07,493 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T00:51:07,493 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T00:51:07,493 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T00:51:07,493 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-08T00:51:07,493 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T00:51:07,494 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33879 2024-12-08T00:51:07,495 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33879 connecting to ZooKeeper ensemble=127.0.0.1:51293 2024-12-08T00:51:07,495 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:51:07,496 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:51:07,505 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:338790x0, quorum=127.0.0.1:51293, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T00:51:07,505 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33879-0x10002f568610001 connected 2024-12-08T00:51:07,505 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33879-0x10002f568610001, quorum=127.0.0.1:51293, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T00:51:07,505 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-08T00:51:07,506 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-08T00:51:07,506 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33879-0x10002f568610001, quorum=127.0.0.1:51293, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-08T00:51:07,507 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33879-0x10002f568610001, quorum=127.0.0.1:51293, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T00:51:07,507 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33879 2024-12-08T00:51:07,507 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33879 2024-12-08T00:51:07,508 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33879 2024-12-08T00:51:07,508 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33879 2024-12-08T00:51:07,508 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33879 2024-12-08T00:51:07,518 DEBUG [M:0;0f983e3e5be1:34507 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;0f983e3e5be1:34507 2024-12-08T00:51:07,518 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/0f983e3e5be1,34507,1733619067350 2024-12-08T00:51:07,530 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33879-0x10002f568610001, quorum=127.0.0.1:51293, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T00:51:07,530 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34507-0x10002f568610000, quorum=127.0.0.1:51293, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T00:51:07,530 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34507-0x10002f568610000, quorum=127.0.0.1:51293, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/0f983e3e5be1,34507,1733619067350 2024-12-08T00:51:07,538 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34507-0x10002f568610000, quorum=127.0.0.1:51293, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:51:07,538 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33879-0x10002f568610001, quorum=127.0.0.1:51293, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-08T00:51:07,538 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33879-0x10002f568610001, quorum=127.0.0.1:51293, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:51:07,538 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34507-0x10002f568610000, quorum=127.0.0.1:51293, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-08T00:51:07,538 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/0f983e3e5be1,34507,1733619067350 from backup master directory 2024-12-08T00:51:07,546 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34507-0x10002f568610000, quorum=127.0.0.1:51293, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/0f983e3e5be1,34507,1733619067350 2024-12-08T00:51:07,546 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33879-0x10002f568610001, quorum=127.0.0.1:51293, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T00:51:07,546 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34507-0x10002f568610000, quorum=127.0.0.1:51293, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T00:51:07,546 WARN [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T00:51:07,546 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=0f983e3e5be1,34507,1733619067350 2024-12-08T00:51:07,550 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:38827/user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74/hbase.id] with ID: ea36c606-7e44-4963-88fd-283f77fac58c 2024-12-08T00:51:07,550 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:38827/user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74/.tmp/hbase.id 2024-12-08T00:51:07,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41313 is added to blk_1073741826_1002 (size=42) 2024-12-08T00:51:07,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38907 is added to blk_1073741826_1002 (size=42) 2024-12-08T00:51:07,555 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:38827/user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74/.tmp/hbase.id]:[hdfs://localhost:38827/user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74/hbase.id] 2024-12-08T00:51:07,564 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:51:07,564 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-08T00:51:07,565 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-08T00:51:07,571 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34507-0x10002f568610000, quorum=127.0.0.1:51293, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:51:07,571 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33879-0x10002f568610001, quorum=127.0.0.1:51293, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:51:07,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41313 is added to blk_1073741827_1003 (size=196) 2024-12-08T00:51:07,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38907 is added to blk_1073741827_1003 (size=196) 2024-12-08T00:51:07,577 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T00:51:07,577 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-08T00:51:07,577 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T00:51:07,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41313 is added to blk_1073741828_1004 (size=1189) 2024-12-08T00:51:07,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38907 is added to blk_1073741828_1004 (size=1189) 2024-12-08T00:51:07,584 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:38827/user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74/MasterData/data/master/store 2024-12-08T00:51:07,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41313 is added to blk_1073741829_1005 (size=34) 2024-12-08T00:51:07,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38907 is added to blk_1073741829_1005 (size=34) 2024-12-08T00:51:07,589 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:51:07,589 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-08T00:51:07,589 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:51:07,589 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:51:07,589 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-08T00:51:07,589 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:51:07,589 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:51:07,589 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733619067589Disabling compacts and flushes for region at 1733619067589Disabling writes for close at 1733619067589Writing region close event to WAL at 1733619067589Closed at 1733619067589 2024-12-08T00:51:07,590 WARN [master/0f983e3e5be1:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:38827/user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74/MasterData/data/master/store/.initializing 2024-12-08T00:51:07,590 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:38827/user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74/MasterData/WALs/0f983e3e5be1,34507,1733619067350 2024-12-08T00:51:07,592 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0f983e3e5be1%2C34507%2C1733619067350, suffix=, logDir=hdfs://localhost:38827/user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74/MasterData/WALs/0f983e3e5be1,34507,1733619067350, archiveDir=hdfs://localhost:38827/user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74/MasterData/oldWALs, maxLogs=10 2024-12-08T00:51:07,592 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0f983e3e5be1%2C34507%2C1733619067350.1733619067592 2024-12-08T00:51:07,596 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74/MasterData/WALs/0f983e3e5be1,34507,1733619067350/0f983e3e5be1%2C34507%2C1733619067350.1733619067592 2024-12-08T00:51:07,597 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42831:42831),(127.0.0.1/127.0.0.1:46591:46591)] 2024-12-08T00:51:07,597 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-08T00:51:07,598 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:51:07,598 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:51:07,598 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:51:07,599 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:51:07,600 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-08T00:51:07,600 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:51:07,600 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:51:07,600 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:51:07,601 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-08T00:51:07,601 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:51:07,601 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T00:51:07,601 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:51:07,602 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-08T00:51:07,602 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:51:07,602 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T00:51:07,602 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:51:07,603 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-08T00:51:07,603 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:51:07,603 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T00:51:07,603 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:51:07,604 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38827/user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:51:07,604 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38827/user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:51:07,605 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:51:07,605 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:51:07,605 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-08T00:51:07,606 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:51:07,607 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38827/user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T00:51:07,607 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=856183, jitterRate=0.0886935144662857}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-08T00:51:07,608 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733619067598Initializing all the Stores at 1733619067598Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733619067598Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733619067598Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733619067598Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733619067598Cleaning up temporary data from old regions at 1733619067605 (+7 ms)Region opened successfully at 1733619067608 (+3 ms) 2024-12-08T00:51:07,608 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-08T00:51:07,610 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@300667b4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0f983e3e5be1/172.17.0.2:0 2024-12-08T00:51:07,611 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-08T00:51:07,611 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-08T00:51:07,611 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-08T00:51:07,611 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-08T00:51:07,612 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-08T00:51:07,612 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-08T00:51:07,612 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-08T00:51:07,614 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-08T00:51:07,614 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34507-0x10002f568610000, quorum=127.0.0.1:51293, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-08T00:51:07,621 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-08T00:51:07,621 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-08T00:51:07,622 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34507-0x10002f568610000, quorum=127.0.0.1:51293, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-08T00:51:07,629 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-08T00:51:07,630 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-08T00:51:07,630 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34507-0x10002f568610000, quorum=127.0.0.1:51293, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-08T00:51:07,638 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-08T00:51:07,639 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34507-0x10002f568610000, quorum=127.0.0.1:51293, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-08T00:51:07,646 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-08T00:51:07,649 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34507-0x10002f568610000, quorum=127.0.0.1:51293, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-08T00:51:07,655 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-08T00:51:07,663 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34507-0x10002f568610000, quorum=127.0.0.1:51293, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T00:51:07,663 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33879-0x10002f568610001, quorum=127.0.0.1:51293, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T00:51:07,663 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33879-0x10002f568610001, quorum=127.0.0.1:51293, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:51:07,663 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34507-0x10002f568610000, quorum=127.0.0.1:51293, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:51:07,664 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=0f983e3e5be1,34507,1733619067350, sessionid=0x10002f568610000, setting cluster-up flag (Was=false) 2024-12-08T00:51:07,679 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34507-0x10002f568610000, quorum=127.0.0.1:51293, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:51:07,680 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33879-0x10002f568610001, quorum=127.0.0.1:51293, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:51:07,705 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-08T00:51:07,707 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0f983e3e5be1,34507,1733619067350 2024-12-08T00:51:07,721 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34507-0x10002f568610000, quorum=127.0.0.1:51293, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:51:07,721 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33879-0x10002f568610001, quorum=127.0.0.1:51293, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:51:07,746 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-08T00:51:07,749 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0f983e3e5be1,34507,1733619067350 2024-12-08T00:51:07,752 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:38827/user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-08T00:51:07,755 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-08T00:51:07,755 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-08T00:51:07,755 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-08T00:51:07,755 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 0f983e3e5be1,34507,1733619067350 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-08T00:51:07,757 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/0f983e3e5be1:0, corePoolSize=5, maxPoolSize=5 2024-12-08T00:51:07,757 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/0f983e3e5be1:0, corePoolSize=5, maxPoolSize=5 2024-12-08T00:51:07,757 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/0f983e3e5be1:0, corePoolSize=5, maxPoolSize=5 2024-12-08T00:51:07,757 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/0f983e3e5be1:0, corePoolSize=5, maxPoolSize=5 2024-12-08T00:51:07,757 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/0f983e3e5be1:0, corePoolSize=10, maxPoolSize=10 2024-12-08T00:51:07,758 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:51:07,758 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/0f983e3e5be1:0, corePoolSize=2, maxPoolSize=2 2024-12-08T00:51:07,758 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:51:07,759 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733619097759 2024-12-08T00:51:07,759 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-08T00:51:07,759 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T00:51:07,759 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-08T00:51:07,759 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-08T00:51:07,759 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-08T00:51:07,759 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-08T00:51:07,759 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-08T00:51:07,759 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-08T00:51:07,760 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-08T00:51:07,760 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-08T00:51:07,760 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-08T00:51:07,760 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-08T00:51:07,760 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-08T00:51:07,760 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-08T00:51:07,761 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:51:07,761 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-08T00:51:07,761 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/0f983e3e5be1:0:becomeActiveMaster-HFileCleaner.large.0-1733619067761,5,FailOnTimeoutGroup] 2024-12-08T00:51:07,761 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/0f983e3e5be1:0:becomeActiveMaster-HFileCleaner.small.0-1733619067761,5,FailOnTimeoutGroup] 2024-12-08T00:51:07,761 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-08T00:51:07,761 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-08T00:51:07,761 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-08T00:51:07,761 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-08T00:51:07,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41313 is added to blk_1073741831_1007 (size=1321) 2024-12-08T00:51:07,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38907 is added to blk_1073741831_1007 (size=1321) 2024-12-08T00:51:07,768 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:38827/user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-08T00:51:07,769 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:38827/user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74 2024-12-08T00:51:07,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38907 is added to blk_1073741832_1008 (size=32) 2024-12-08T00:51:07,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41313 is added to blk_1073741832_1008 (size=32) 2024-12-08T00:51:07,775 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:51:07,776 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-08T00:51:07,777 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-08T00:51:07,777 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:51:07,777 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:51:07,777 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-08T00:51:07,778 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-08T00:51:07,778 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:51:07,778 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:51:07,778 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-08T00:51:07,779 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-08T00:51:07,779 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:51:07,780 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:51:07,780 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-08T00:51:07,781 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-08T00:51:07,781 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:51:07,781 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:51:07,781 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-08T00:51:07,782 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38827/user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74/data/hbase/meta/1588230740 2024-12-08T00:51:07,782 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38827/user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74/data/hbase/meta/1588230740 2024-12-08T00:51:07,783 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-08T00:51:07,783 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-08T00:51:07,783 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-08T00:51:07,784 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-08T00:51:07,786 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38827/user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T00:51:07,786 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=766870, jitterRate=-0.02487453818321228}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-08T00:51:07,786 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733619067775Initializing all the Stores at 1733619067775Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733619067775Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733619067775Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733619067775Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733619067775Cleaning up temporary data from old regions at 1733619067783 (+8 ms)Region opened successfully at 1733619067786 (+3 ms) 2024-12-08T00:51:07,786 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-08T00:51:07,786 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-08T00:51:07,786 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-08T00:51:07,787 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-08T00:51:07,787 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-08T00:51:07,787 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-08T00:51:07,787 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733619067786Disabling compacts and flushes for region at 1733619067786Disabling writes for close at 1733619067787 (+1 ms)Writing region close event to WAL at 1733619067787Closed at 1733619067787 2024-12-08T00:51:07,788 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T00:51:07,788 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-08T00:51:07,788 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-08T00:51:07,789 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-08T00:51:07,789 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-08T00:51:07,810 INFO [RS:0;0f983e3e5be1:33879 {}] regionserver.HRegionServer(746): ClusterId : ea36c606-7e44-4963-88fd-283f77fac58c 2024-12-08T00:51:07,810 DEBUG [RS:0;0f983e3e5be1:33879 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-08T00:51:07,823 DEBUG [RS:0;0f983e3e5be1:33879 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-08T00:51:07,823 DEBUG [RS:0;0f983e3e5be1:33879 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-08T00:51:07,831 DEBUG [RS:0;0f983e3e5be1:33879 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-08T00:51:07,832 DEBUG [RS:0;0f983e3e5be1:33879 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@208600aa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0f983e3e5be1/172.17.0.2:0 2024-12-08T00:51:07,847 DEBUG [RS:0;0f983e3e5be1:33879 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;0f983e3e5be1:33879 2024-12-08T00:51:07,847 INFO [RS:0;0f983e3e5be1:33879 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-08T00:51:07,847 INFO [RS:0;0f983e3e5be1:33879 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-08T00:51:07,847 DEBUG [RS:0;0f983e3e5be1:33879 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-08T00:51:07,848 INFO [RS:0;0f983e3e5be1:33879 {}] regionserver.HRegionServer(2659): reportForDuty to master=0f983e3e5be1,34507,1733619067350 with port=33879, startcode=1733619067493 2024-12-08T00:51:07,848 DEBUG [RS:0;0f983e3e5be1:33879 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-08T00:51:07,849 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52815, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-12-08T00:51:07,850 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34507 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 0f983e3e5be1,33879,1733619067493 2024-12-08T00:51:07,850 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34507 {}] master.ServerManager(517): Registering regionserver=0f983e3e5be1,33879,1733619067493 2024-12-08T00:51:07,851 DEBUG [RS:0;0f983e3e5be1:33879 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:38827/user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74 2024-12-08T00:51:07,851 DEBUG [RS:0;0f983e3e5be1:33879 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:38827 2024-12-08T00:51:07,851 DEBUG [RS:0;0f983e3e5be1:33879 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-08T00:51:07,854 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34507-0x10002f568610000, quorum=127.0.0.1:51293, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T00:51:07,855 DEBUG [RS:0;0f983e3e5be1:33879 {}] zookeeper.ZKUtil(111): regionserver:33879-0x10002f568610001, quorum=127.0.0.1:51293, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/0f983e3e5be1,33879,1733619067493 2024-12-08T00:51:07,855 WARN [RS:0;0f983e3e5be1:33879 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T00:51:07,855 INFO [RS:0;0f983e3e5be1:33879 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T00:51:07,855 DEBUG [RS:0;0f983e3e5be1:33879 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:38827/user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74/WALs/0f983e3e5be1,33879,1733619067493 2024-12-08T00:51:07,855 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [0f983e3e5be1,33879,1733619067493] 2024-12-08T00:51:07,858 INFO [RS:0;0f983e3e5be1:33879 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-08T00:51:07,860 INFO [RS:0;0f983e3e5be1:33879 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-08T00:51:07,860 INFO [RS:0;0f983e3e5be1:33879 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-08T00:51:07,860 INFO [RS:0;0f983e3e5be1:33879 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T00:51:07,860 INFO [RS:0;0f983e3e5be1:33879 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-08T00:51:07,861 INFO [RS:0;0f983e3e5be1:33879 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-08T00:51:07,861 INFO [RS:0;0f983e3e5be1:33879 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-08T00:51:07,861 DEBUG [RS:0;0f983e3e5be1:33879 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:51:07,861 DEBUG [RS:0;0f983e3e5be1:33879 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:51:07,861 DEBUG [RS:0;0f983e3e5be1:33879 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:51:07,861 DEBUG [RS:0;0f983e3e5be1:33879 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:51:07,861 DEBUG [RS:0;0f983e3e5be1:33879 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:51:07,861 DEBUG [RS:0;0f983e3e5be1:33879 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/0f983e3e5be1:0, corePoolSize=2, maxPoolSize=2 2024-12-08T00:51:07,861 DEBUG [RS:0;0f983e3e5be1:33879 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:51:07,861 DEBUG [RS:0;0f983e3e5be1:33879 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:51:07,861 DEBUG [RS:0;0f983e3e5be1:33879 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:51:07,861 DEBUG [RS:0;0f983e3e5be1:33879 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:51:07,861 DEBUG [RS:0;0f983e3e5be1:33879 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:51:07,861 DEBUG [RS:0;0f983e3e5be1:33879 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:51:07,861 DEBUG [RS:0;0f983e3e5be1:33879 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/0f983e3e5be1:0, corePoolSize=3, maxPoolSize=3 2024-12-08T00:51:07,861 DEBUG [RS:0;0f983e3e5be1:33879 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0, corePoolSize=3, maxPoolSize=3 2024-12-08T00:51:07,862 INFO [RS:0;0f983e3e5be1:33879 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T00:51:07,862 INFO [RS:0;0f983e3e5be1:33879 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T00:51:07,862 INFO [RS:0;0f983e3e5be1:33879 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T00:51:07,862 INFO [RS:0;0f983e3e5be1:33879 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-08T00:51:07,862 INFO [RS:0;0f983e3e5be1:33879 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-08T00:51:07,862 INFO [RS:0;0f983e3e5be1:33879 {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,33879,1733619067493-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T00:51:07,875 INFO [RS:0;0f983e3e5be1:33879 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-08T00:51:07,875 INFO [RS:0;0f983e3e5be1:33879 {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,33879,1733619067493-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T00:51:07,875 INFO [RS:0;0f983e3e5be1:33879 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T00:51:07,875 INFO [RS:0;0f983e3e5be1:33879 {}] regionserver.Replication(171): 0f983e3e5be1,33879,1733619067493 started 2024-12-08T00:51:07,885 INFO [RS:0;0f983e3e5be1:33879 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T00:51:07,885 INFO [RS:0;0f983e3e5be1:33879 {}] regionserver.HRegionServer(1482): Serving as 0f983e3e5be1,33879,1733619067493, RpcServer on 0f983e3e5be1/172.17.0.2:33879, sessionid=0x10002f568610001 2024-12-08T00:51:07,885 DEBUG [RS:0;0f983e3e5be1:33879 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-08T00:51:07,885 DEBUG [RS:0;0f983e3e5be1:33879 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 0f983e3e5be1,33879,1733619067493 2024-12-08T00:51:07,885 DEBUG [RS:0;0f983e3e5be1:33879 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0f983e3e5be1,33879,1733619067493' 2024-12-08T00:51:07,885 DEBUG [RS:0;0f983e3e5be1:33879 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-08T00:51:07,886 DEBUG [RS:0;0f983e3e5be1:33879 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-08T00:51:07,886 DEBUG [RS:0;0f983e3e5be1:33879 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-08T00:51:07,886 DEBUG [RS:0;0f983e3e5be1:33879 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-08T00:51:07,886 DEBUG [RS:0;0f983e3e5be1:33879 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 0f983e3e5be1,33879,1733619067493 2024-12-08T00:51:07,886 DEBUG [RS:0;0f983e3e5be1:33879 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0f983e3e5be1,33879,1733619067493' 2024-12-08T00:51:07,886 DEBUG [RS:0;0f983e3e5be1:33879 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-08T00:51:07,887 DEBUG [RS:0;0f983e3e5be1:33879 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-08T00:51:07,887 DEBUG [RS:0;0f983e3e5be1:33879 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-08T00:51:07,887 INFO [RS:0;0f983e3e5be1:33879 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-08T00:51:07,887 INFO [RS:0;0f983e3e5be1:33879 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-08T00:51:07,940 WARN [0f983e3e5be1:34507 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-08T00:51:07,990 INFO [RS:0;0f983e3e5be1:33879 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0f983e3e5be1%2C33879%2C1733619067493, suffix=, logDir=hdfs://localhost:38827/user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74/WALs/0f983e3e5be1,33879,1733619067493, archiveDir=hdfs://localhost:38827/user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74/oldWALs, maxLogs=32 2024-12-08T00:51:07,992 INFO [RS:0;0f983e3e5be1:33879 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0f983e3e5be1%2C33879%2C1733619067493.1733619067991 2024-12-08T00:51:08,000 INFO [RS:0;0f983e3e5be1:33879 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74/WALs/0f983e3e5be1,33879,1733619067493/0f983e3e5be1%2C33879%2C1733619067493.1733619067991 2024-12-08T00:51:08,002 DEBUG [RS:0;0f983e3e5be1:33879 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46591:46591),(127.0.0.1/127.0.0.1:42831:42831)] 2024-12-08T00:51:08,190 DEBUG [0f983e3e5be1:34507 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-08T00:51:08,191 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=0f983e3e5be1,33879,1733619067493 2024-12-08T00:51:08,193 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0f983e3e5be1,33879,1733619067493, state=OPENING 2024-12-08T00:51:08,247 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-08T00:51:08,255 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34507-0x10002f568610000, quorum=127.0.0.1:51293, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:51:08,255 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33879-0x10002f568610001, quorum=127.0.0.1:51293, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:51:08,255 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T00:51:08,255 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T00:51:08,255 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-08T00:51:08,256 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=0f983e3e5be1,33879,1733619067493}] 2024-12-08T00:51:08,409 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-08T00:51:08,414 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35419, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-08T00:51:08,420 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-08T00:51:08,420 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T00:51:08,422 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0f983e3e5be1%2C33879%2C1733619067493.meta, suffix=.meta, logDir=hdfs://localhost:38827/user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74/WALs/0f983e3e5be1,33879,1733619067493, archiveDir=hdfs://localhost:38827/user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74/oldWALs, maxLogs=32 2024-12-08T00:51:08,423 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 0f983e3e5be1%2C33879%2C1733619067493.meta.1733619068422.meta 2024-12-08T00:51:08,429 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74/WALs/0f983e3e5be1,33879,1733619067493/0f983e3e5be1%2C33879%2C1733619067493.meta.1733619068422.meta 2024-12-08T00:51:08,429 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46591:46591),(127.0.0.1/127.0.0.1:42831:42831)] 2024-12-08T00:51:08,430 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-08T00:51:08,430 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-08T00:51:08,430 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-08T00:51:08,430 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-08T00:51:08,430 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-08T00:51:08,430 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:51:08,431 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-08T00:51:08,431 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-08T00:51:08,432 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-08T00:51:08,432 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-08T00:51:08,432 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:51:08,432 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:51:08,433 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-08T00:51:08,433 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-08T00:51:08,433 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:51:08,433 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:51:08,433 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-08T00:51:08,434 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-08T00:51:08,434 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:51:08,434 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:51:08,434 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-08T00:51:08,435 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-08T00:51:08,435 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:51:08,435 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:51:08,435 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-08T00:51:08,436 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38827/user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74/data/hbase/meta/1588230740 2024-12-08T00:51:08,436 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38827/user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74/data/hbase/meta/1588230740 2024-12-08T00:51:08,437 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-08T00:51:08,437 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-08T00:51:08,438 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-08T00:51:08,438 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-08T00:51:08,439 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=749445, jitterRate=-0.04703260958194733}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-08T00:51:08,439 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-08T00:51:08,439 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733619068431Writing region info on filesystem at 1733619068431Initializing all the Stores at 1733619068431Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733619068431Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733619068431Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733619068431Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733619068431Cleaning up temporary data from old regions at 1733619068437 (+6 ms)Running coprocessor post-open hooks at 1733619068439 (+2 ms)Region opened successfully at 1733619068439 2024-12-08T00:51:08,440 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733619068408 2024-12-08T00:51:08,442 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-08T00:51:08,442 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-08T00:51:08,442 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=0f983e3e5be1,33879,1733619067493 2024-12-08T00:51:08,443 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0f983e3e5be1,33879,1733619067493, state=OPEN 2024-12-08T00:51:08,456 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,46325,1733618889648/0f983e3e5be1%2C46325%2C1733618889648.1733618889880 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:51:08,478 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34853/user/jenkins/test-data/f27564f9-b596-f54a-b82a-e5bcc0a69956/WALs/0f983e3e5be1,33993,1733618888281/0f983e3e5be1%2C33993%2C1733618888281.meta.1733618889450.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T00:51:08,480 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34507-0x10002f568610000, quorum=127.0.0.1:51293, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T00:51:08,480 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33879-0x10002f568610001, quorum=127.0.0.1:51293, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T00:51:08,480 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=0f983e3e5be1,33879,1733619067493 2024-12-08T00:51:08,480 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T00:51:08,480 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T00:51:08,483 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-08T00:51:08,483 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=0f983e3e5be1,33879,1733619067493 in 225 msec 2024-12-08T00:51:08,485 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-08T00:51:08,486 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 695 msec 2024-12-08T00:51:08,486 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T00:51:08,486 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-08T00:51:08,488 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-08T00:51:08,488 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=0f983e3e5be1,33879,1733619067493, seqNum=-1] 2024-12-08T00:51:08,488 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:51:08,490 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41191, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:51:08,496 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 741 msec 2024-12-08T00:51:08,496 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733619068496, completionTime=-1 2024-12-08T00:51:08,496 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-08T00:51:08,496 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-08T00:51:08,499 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-08T00:51:08,499 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733619128499 2024-12-08T00:51:08,499 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733619188499 2024-12-08T00:51:08,499 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-08T00:51:08,499 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,34507,1733619067350-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T00:51:08,499 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,34507,1733619067350-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T00:51:08,499 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,34507,1733619067350-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T00:51:08,499 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-0f983e3e5be1:34507, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T00:51:08,500 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-08T00:51:08,500 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-08T00:51:08,502 DEBUG [master/0f983e3e5be1:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-08T00:51:08,505 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.959sec 2024-12-08T00:51:08,505 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-08T00:51:08,505 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-08T00:51:08,505 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-08T00:51:08,505 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-08T00:51:08,505 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-08T00:51:08,505 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,34507,1733619067350-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T00:51:08,505 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,34507,1733619067350-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-08T00:51:08,508 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-08T00:51:08,508 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-08T00:51:08,508 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,34507,1733619067350-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T00:51:08,510 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4c1990e2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:51:08,510 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 0f983e3e5be1,34507,-1 for getting cluster id 2024-12-08T00:51:08,510 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-08T00:51:08,511 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'ea36c606-7e44-4963-88fd-283f77fac58c' 2024-12-08T00:51:08,511 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-08T00:51:08,512 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "ea36c606-7e44-4963-88fd-283f77fac58c" 2024-12-08T00:51:08,512 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f0d5326, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:51:08,512 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [0f983e3e5be1,34507,-1] 2024-12-08T00:51:08,512 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-08T00:51:08,512 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:51:08,513 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47492, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-08T00:51:08,513 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@45717e1d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:51:08,514 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-08T00:51:08,515 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=0f983e3e5be1,33879,1733619067493, seqNum=-1] 2024-12-08T00:51:08,515 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:51:08,516 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46530, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:51:08,517 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=0f983e3e5be1,34507,1733619067350 2024-12-08T00:51:08,517 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:51:08,519 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-08T00:51:08,519 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T00:51:08,521 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:38827/user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74/WALs/test.com,8080,1, archiveDir=hdfs://localhost:38827/user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74/oldWALs, maxLogs=32 2024-12-08T00:51:08,521 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1733619068521 2024-12-08T00:51:08,525 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74/WALs/test.com,8080,1/test.com%2C8080%2C1.1733619068521 2024-12-08T00:51:08,528 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46591:46591),(127.0.0.1/127.0.0.1:42831:42831)] 2024-12-08T00:51:08,531 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1733619068531 2024-12-08T00:51:08,536 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:51:08,536 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:51:08,536 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:51:08,536 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:51:08,536 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:51:08,537 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74/WALs/test.com,8080,1/test.com%2C8080%2C1.1733619068521 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74/WALs/test.com,8080,1/test.com%2C8080%2C1.1733619068531 2024-12-08T00:51:08,537 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46591:46591),(127.0.0.1/127.0.0.1:42831:42831)] 2024-12-08T00:51:08,537 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38827/user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74/WALs/test.com,8080,1/test.com%2C8080%2C1.1733619068521 is not closed yet, will try archiving it next time 2024-12-08T00:51:08,538 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:51:08,538 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:51:08,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38907 is added to blk_1073741835_1011 (size=93) 2024-12-08T00:51:08,538 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:51:08,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41313 is added to blk_1073741835_1011 (size=93) 2024-12-08T00:51:08,539 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38827/user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74/WALs/test.com,8080,1/test.com%2C8080%2C1.1733619068521 to hdfs://localhost:38827/user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74/oldWALs/test.com%2C8080%2C1.1733619068521 2024-12-08T00:51:08,539 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:51:08,540 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:51:08,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38907 is added to blk_1073741836_1012 (size=93) 2024-12-08T00:51:08,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41313 is added to blk_1073741836_1012 (size=93) 2024-12-08T00:51:08,543 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74/oldWALs 2024-12-08T00:51:08,543 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1733619068531) 2024-12-08T00:51:08,543 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-08T00:51:08,544 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-08T00:51:08,544 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T00:51:08,544 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:51:08,544 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:51:08,544 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-08T00:51:08,544 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-08T00:51:08,544 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=175539105, stopped=false 2024-12-08T00:51:08,544 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=0f983e3e5be1,34507,1733619067350 2024-12-08T00:51:08,571 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33879-0x10002f568610001, quorum=127.0.0.1:51293, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T00:51:08,571 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34507-0x10002f568610000, quorum=127.0.0.1:51293, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T00:51:08,571 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33879-0x10002f568610001, quorum=127.0.0.1:51293, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:51:08,571 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34507-0x10002f568610000, quorum=127.0.0.1:51293, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:51:08,571 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-08T00:51:08,572 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-08T00:51:08,572 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T00:51:08,572 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:51:08,572 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34507-0x10002f568610000, quorum=127.0.0.1:51293, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T00:51:08,572 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '0f983e3e5be1,33879,1733619067493' ***** 2024-12-08T00:51:08,572 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-08T00:51:08,572 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33879-0x10002f568610001, quorum=127.0.0.1:51293, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T00:51:08,573 INFO [RS:0;0f983e3e5be1:33879 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-08T00:51:08,573 INFO [RS:0;0f983e3e5be1:33879 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-08T00:51:08,573 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-08T00:51:08,573 INFO [RS:0;0f983e3e5be1:33879 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-08T00:51:08,573 INFO [RS:0;0f983e3e5be1:33879 {}] regionserver.HRegionServer(959): stopping server 0f983e3e5be1,33879,1733619067493 2024-12-08T00:51:08,573 INFO [RS:0;0f983e3e5be1:33879 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T00:51:08,573 INFO [RS:0;0f983e3e5be1:33879 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;0f983e3e5be1:33879. 2024-12-08T00:51:08,573 DEBUG [RS:0;0f983e3e5be1:33879 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T00:51:08,573 DEBUG [RS:0;0f983e3e5be1:33879 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:51:08,574 INFO [RS:0;0f983e3e5be1:33879 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-08T00:51:08,574 INFO [RS:0;0f983e3e5be1:33879 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-08T00:51:08,574 INFO [RS:0;0f983e3e5be1:33879 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-08T00:51:08,574 INFO [RS:0;0f983e3e5be1:33879 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-08T00:51:08,574 INFO [RS:0;0f983e3e5be1:33879 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-08T00:51:08,574 DEBUG [RS:0;0f983e3e5be1:33879 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-08T00:51:08,574 DEBUG [RS:0;0f983e3e5be1:33879 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-08T00:51:08,574 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-08T00:51:08,574 INFO [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-08T00:51:08,574 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-08T00:51:08,575 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-08T00:51:08,575 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-08T00:51:08,575 INFO [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-12-08T00:51:08,592 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38827/user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74/data/hbase/meta/1588230740/.tmp/ns/76ae6134541d453a82a22eead7cae826 is 43, key is default/ns:d/1733619068490/Put/seqid=0 2024-12-08T00:51:08,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41313 is added to blk_1073741837_1013 (size=5153) 2024-12-08T00:51:08,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38907 is added to blk_1073741837_1013 (size=5153) 2024-12-08T00:51:08,596 INFO [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:38827/user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74/data/hbase/meta/1588230740/.tmp/ns/76ae6134541d453a82a22eead7cae826 2024-12-08T00:51:08,601 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38827/user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74/data/hbase/meta/1588230740/.tmp/ns/76ae6134541d453a82a22eead7cae826 as hdfs://localhost:38827/user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74/data/hbase/meta/1588230740/ns/76ae6134541d453a82a22eead7cae826 2024-12-08T00:51:08,606 INFO [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38827/user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74/data/hbase/meta/1588230740/ns/76ae6134541d453a82a22eead7cae826, entries=2, sequenceid=6, filesize=5.0 K 2024-12-08T00:51:08,607 INFO [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 32ms, sequenceid=6, compaction requested=false 2024-12-08T00:51:08,611 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38827/user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-08T00:51:08,611 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-08T00:51:08,611 INFO [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-08T00:51:08,611 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733619068574Running coprocessor pre-close hooks at 1733619068574Disabling compacts and flushes for region at 1733619068574Disabling writes for close at 1733619068575 (+1 ms)Obtaining lock to block concurrent updates at 1733619068575Preparing flush snapshotting stores in 1588230740 at 1733619068575Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1733619068575Flushing stores of hbase:meta,,1.1588230740 at 1733619068576 (+1 ms)Flushing 1588230740/ns: creating writer at 1733619068577 (+1 ms)Flushing 1588230740/ns: appending metadata at 1733619068592 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1733619068592Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1668ffd6: reopening flushed file at 1733619068600 (+8 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 32ms, sequenceid=6, compaction requested=false at 1733619068607 (+7 ms)Writing region close event to WAL at 1733619068608 (+1 ms)Running coprocessor post-close hooks at 1733619068611 (+3 ms)Closed at 1733619068611 2024-12-08T00:51:08,611 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-08T00:51:08,775 INFO [RS:0;0f983e3e5be1:33879 {}] regionserver.HRegionServer(976): stopping server 0f983e3e5be1,33879,1733619067493; all regions closed. 2024-12-08T00:51:08,775 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:51:08,776 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:51:08,776 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:51:08,776 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:51:08,776 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:51:08,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38907 is added to blk_1073741834_1010 (size=1152) 2024-12-08T00:51:08,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41313 is added to blk_1073741834_1010 (size=1152) 2024-12-08T00:51:08,784 DEBUG [RS:0;0f983e3e5be1:33879 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74/oldWALs 2024-12-08T00:51:08,784 INFO [RS:0;0f983e3e5be1:33879 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 0f983e3e5be1%2C33879%2C1733619067493.meta:.meta(num 1733619068422) 2024-12-08T00:51:08,785 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:51:08,785 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:51:08,785 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:51:08,785 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:51:08,786 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:51:08,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38907 is added to blk_1073741833_1009 (size=93) 2024-12-08T00:51:08,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41313 is added to blk_1073741833_1009 (size=93) 2024-12-08T00:51:08,792 DEBUG [RS:0;0f983e3e5be1:33879 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74/oldWALs 2024-12-08T00:51:08,792 INFO [RS:0;0f983e3e5be1:33879 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 0f983e3e5be1%2C33879%2C1733619067493:(num 1733619067991) 2024-12-08T00:51:08,792 DEBUG [RS:0;0f983e3e5be1:33879 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:51:08,792 INFO [RS:0;0f983e3e5be1:33879 {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T00:51:08,792 INFO [RS:0;0f983e3e5be1:33879 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T00:51:08,792 INFO [RS:0;0f983e3e5be1:33879 {}] hbase.ChoreService(370): Chore service for: regionserver/0f983e3e5be1:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-08T00:51:08,792 INFO [RS:0;0f983e3e5be1:33879 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T00:51:08,792 INFO [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T00:51:08,792 INFO [RS:0;0f983e3e5be1:33879 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33879 2024-12-08T00:51:08,821 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33879-0x10002f568610001, quorum=127.0.0.1:51293, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/0f983e3e5be1,33879,1733619067493 2024-12-08T00:51:08,821 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34507-0x10002f568610000, quorum=127.0.0.1:51293, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T00:51:08,821 INFO [RS:0;0f983e3e5be1:33879 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T00:51:08,829 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [0f983e3e5be1,33879,1733619067493] 2024-12-08T00:51:08,837 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/0f983e3e5be1,33879,1733619067493 already deleted, retry=false 2024-12-08T00:51:08,838 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 0f983e3e5be1,33879,1733619067493 expired; onlineServers=0 2024-12-08T00:51:08,838 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '0f983e3e5be1,34507,1733619067350' ***** 2024-12-08T00:51:08,838 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-08T00:51:08,838 INFO [M:0;0f983e3e5be1:34507 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T00:51:08,838 INFO [M:0;0f983e3e5be1:34507 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T00:51:08,838 DEBUG [M:0;0f983e3e5be1:34507 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-08T00:51:08,838 DEBUG [M:0;0f983e3e5be1:34507 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-08T00:51:08,838 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-08T00:51:08,838 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster-HFileCleaner.small.0-1733619067761 {}] cleaner.HFileCleaner(306): Exit Thread[master/0f983e3e5be1:0:becomeActiveMaster-HFileCleaner.small.0-1733619067761,5,FailOnTimeoutGroup] 2024-12-08T00:51:08,838 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster-HFileCleaner.large.0-1733619067761 {}] cleaner.HFileCleaner(306): Exit Thread[master/0f983e3e5be1:0:becomeActiveMaster-HFileCleaner.large.0-1733619067761,5,FailOnTimeoutGroup] 2024-12-08T00:51:08,838 INFO [M:0;0f983e3e5be1:34507 {}] hbase.ChoreService(370): Chore service for: master/0f983e3e5be1:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-08T00:51:08,838 INFO [M:0;0f983e3e5be1:34507 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T00:51:08,838 DEBUG [M:0;0f983e3e5be1:34507 {}] master.HMaster(1795): Stopping service threads 2024-12-08T00:51:08,838 INFO [M:0;0f983e3e5be1:34507 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-08T00:51:08,838 INFO [M:0;0f983e3e5be1:34507 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-08T00:51:08,838 INFO [M:0;0f983e3e5be1:34507 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-08T00:51:08,839 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-08T00:51:08,846 DEBUG [M:0;0f983e3e5be1:34507 {}] zookeeper.ZKUtil(347): master:34507-0x10002f568610000, quorum=127.0.0.1:51293, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-08T00:51:08,846 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34507-0x10002f568610000, quorum=127.0.0.1:51293, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-08T00:51:08,846 WARN [M:0;0f983e3e5be1:34507 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-08T00:51:08,846 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34507-0x10002f568610000, quorum=127.0.0.1:51293, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:51:08,847 INFO [M:0;0f983e3e5be1:34507 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:38827/user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74/.lastflushedseqids 2024-12-08T00:51:08,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41313 is added to blk_1073741838_1014 (size=99) 2024-12-08T00:51:08,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38907 is added to blk_1073741838_1014 (size=99) 2024-12-08T00:51:08,852 INFO [M:0;0f983e3e5be1:34507 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-08T00:51:08,853 INFO [M:0;0f983e3e5be1:34507 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-08T00:51:08,853 DEBUG [M:0;0f983e3e5be1:34507 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-08T00:51:08,853 INFO [M:0;0f983e3e5be1:34507 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:51:08,853 DEBUG [M:0;0f983e3e5be1:34507 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:51:08,853 DEBUG [M:0;0f983e3e5be1:34507 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-08T00:51:08,853 DEBUG [M:0;0f983e3e5be1:34507 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:51:08,853 INFO [M:0;0f983e3e5be1:34507 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-12-08T00:51:08,869 DEBUG [M:0;0f983e3e5be1:34507 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38827/user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/32cd8b986b1d4d16a0b00696e7f31335 is 82, key is hbase:meta,,1/info:regioninfo/1733619068442/Put/seqid=0 2024-12-08T00:51:08,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38907 is added to blk_1073741839_1015 (size=5672) 2024-12-08T00:51:08,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41313 is added to blk_1073741839_1015 (size=5672) 2024-12-08T00:51:08,874 INFO [M:0;0f983e3e5be1:34507 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:38827/user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/32cd8b986b1d4d16a0b00696e7f31335 2024-12-08T00:51:08,891 DEBUG [M:0;0f983e3e5be1:34507 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38827/user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/47eff465cb7847d7ad709fd817832556 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1733619068495/Put/seqid=0 2024-12-08T00:51:08,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38907 is added to blk_1073741840_1016 (size=5275) 2024-12-08T00:51:08,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41313 is added to blk_1073741840_1016 (size=5275) 2024-12-08T00:51:08,895 INFO [M:0;0f983e3e5be1:34507 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:38827/user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/47eff465cb7847d7ad709fd817832556 2024-12-08T00:51:08,911 DEBUG [M:0;0f983e3e5be1:34507 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38827/user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7c151c18edfb42a48ef5b965f1a6bcca is 69, key is 0f983e3e5be1,33879,1733619067493/rs:state/1733619067850/Put/seqid=0 2024-12-08T00:51:08,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41313 is added to blk_1073741841_1017 (size=5156) 2024-12-08T00:51:08,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38907 is added to blk_1073741841_1017 (size=5156) 2024-12-08T00:51:08,915 INFO [M:0;0f983e3e5be1:34507 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:38827/user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7c151c18edfb42a48ef5b965f1a6bcca 2024-12-08T00:51:08,930 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33879-0x10002f568610001, quorum=127.0.0.1:51293, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T00:51:08,930 INFO [RS:0;0f983e3e5be1:33879 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T00:51:08,930 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33879-0x10002f568610001, quorum=127.0.0.1:51293, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T00:51:08,930 INFO [RS:0;0f983e3e5be1:33879 {}] regionserver.HRegionServer(1031): Exiting; stopping=0f983e3e5be1,33879,1733619067493; zookeeper connection closed. 2024-12-08T00:51:08,930 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@16382d17 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@16382d17 2024-12-08T00:51:08,930 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-08T00:51:08,931 DEBUG [M:0;0f983e3e5be1:34507 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38827/user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/192c5640bc7e40d2b403b6ff5c656c47 is 52, key is load_balancer_on/state:d/1733619068518/Put/seqid=0 2024-12-08T00:51:08,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41313 is added to blk_1073741842_1018 (size=5056) 2024-12-08T00:51:08,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38907 is added to blk_1073741842_1018 (size=5056) 2024-12-08T00:51:08,935 INFO [M:0;0f983e3e5be1:34507 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:38827/user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/192c5640bc7e40d2b403b6ff5c656c47 2024-12-08T00:51:08,939 DEBUG [M:0;0f983e3e5be1:34507 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38827/user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/32cd8b986b1d4d16a0b00696e7f31335 as hdfs://localhost:38827/user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/32cd8b986b1d4d16a0b00696e7f31335 2024-12-08T00:51:08,943 INFO [M:0;0f983e3e5be1:34507 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38827/user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/32cd8b986b1d4d16a0b00696e7f31335, entries=8, sequenceid=29, filesize=5.5 K 2024-12-08T00:51:08,944 DEBUG [M:0;0f983e3e5be1:34507 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38827/user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/47eff465cb7847d7ad709fd817832556 as hdfs://localhost:38827/user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/47eff465cb7847d7ad709fd817832556 2024-12-08T00:51:08,948 INFO [M:0;0f983e3e5be1:34507 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38827/user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/47eff465cb7847d7ad709fd817832556, entries=3, sequenceid=29, filesize=5.2 K 2024-12-08T00:51:08,949 DEBUG [M:0;0f983e3e5be1:34507 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38827/user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7c151c18edfb42a48ef5b965f1a6bcca as hdfs://localhost:38827/user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/7c151c18edfb42a48ef5b965f1a6bcca 2024-12-08T00:51:08,952 INFO [M:0;0f983e3e5be1:34507 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38827/user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/7c151c18edfb42a48ef5b965f1a6bcca, entries=1, sequenceid=29, filesize=5.0 K 2024-12-08T00:51:08,953 DEBUG [M:0;0f983e3e5be1:34507 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38827/user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/192c5640bc7e40d2b403b6ff5c656c47 as hdfs://localhost:38827/user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/192c5640bc7e40d2b403b6ff5c656c47 2024-12-08T00:51:08,956 INFO [M:0;0f983e3e5be1:34507 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38827/user/jenkins/test-data/0b7a4c17-8641-4b85-398d-31d8b7f41b74/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/192c5640bc7e40d2b403b6ff5c656c47, entries=1, sequenceid=29, filesize=4.9 K 2024-12-08T00:51:08,957 INFO [M:0;0f983e3e5be1:34507 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 104ms, sequenceid=29, compaction requested=false 2024-12-08T00:51:08,958 INFO [M:0;0f983e3e5be1:34507 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:51:08,958 DEBUG [M:0;0f983e3e5be1:34507 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733619068853Disabling compacts and flushes for region at 1733619068853Disabling writes for close at 1733619068853Obtaining lock to block concurrent updates at 1733619068853Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733619068853Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1733619068854 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733619068854Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733619068854Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733619068869 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733619068869Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733619068878 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733619068891 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733619068891Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733619068899 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733619068911 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733619068911Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733619068919 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733619068930 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733619068930Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@32f69f55: reopening flushed file at 1733619068938 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7de48e21: reopening flushed file at 1733619068943 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@fdc594d: reopening flushed file at 1733619068948 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4a99735a: reopening flushed file at 1733619068952 (+4 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 104ms, sequenceid=29, compaction requested=false at 1733619068957 (+5 ms)Writing region close event to WAL at 1733619068958 (+1 ms)Closed at 1733619068958 2024-12-08T00:51:08,959 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:51:08,959 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:51:08,959 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:51:08,959 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:51:08,959 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:51:08,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38907 is added to blk_1073741830_1006 (size=10311) 2024-12-08T00:51:08,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41313 is added to blk_1073741830_1006 (size=10311) 2024-12-08T00:51:08,961 INFO [M:0;0f983e3e5be1:34507 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-08T00:51:08,961 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T00:51:08,961 INFO [M:0;0f983e3e5be1:34507 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34507 2024-12-08T00:51:08,961 INFO [M:0;0f983e3e5be1:34507 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T00:51:09,088 INFO [M:0;0f983e3e5be1:34507 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T00:51:09,088 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34507-0x10002f568610000, quorum=127.0.0.1:51293, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T00:51:09,088 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34507-0x10002f568610000, quorum=127.0.0.1:51293, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T00:51:09,093 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@33430efa{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T00:51:09,094 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2050b40d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T00:51:09,094 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T00:51:09,094 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5a6bd873{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T00:51:09,095 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5f73177b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b00dbc42-c2ae-8329-7b75-f6549bfdae67/hadoop.log.dir/,STOPPED} 2024-12-08T00:51:09,096 WARN [BP-547448715-172.17.0.2-1733619065492 heartbeating to localhost/127.0.0.1:38827 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T00:51:09,096 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T00:51:09,097 WARN [BP-547448715-172.17.0.2-1733619065492 heartbeating to localhost/127.0.0.1:38827 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-547448715-172.17.0.2-1733619065492 (Datanode Uuid 25605410-68a3-4dda-82cd-6a33cd3f4711) service to localhost/127.0.0.1:38827 2024-12-08T00:51:09,097 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T00:51:09,097 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b00dbc42-c2ae-8329-7b75-f6549bfdae67/cluster_6c8cb891-6415-4cfe-b540-e3580e1e26a6/data/data3/current/BP-547448715-172.17.0.2-1733619065492 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T00:51:09,097 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b00dbc42-c2ae-8329-7b75-f6549bfdae67/cluster_6c8cb891-6415-4cfe-b540-e3580e1e26a6/data/data4/current/BP-547448715-172.17.0.2-1733619065492 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T00:51:09,098 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T00:51:09,100 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5c21148f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T00:51:09,100 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@20eba1fc{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T00:51:09,100 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T00:51:09,100 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3d5d7f03{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T00:51:09,100 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@660a9944{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b00dbc42-c2ae-8329-7b75-f6549bfdae67/hadoop.log.dir/,STOPPED} 2024-12-08T00:51:09,101 WARN [BP-547448715-172.17.0.2-1733619065492 heartbeating to localhost/127.0.0.1:38827 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T00:51:09,101 WARN [BP-547448715-172.17.0.2-1733619065492 heartbeating to localhost/127.0.0.1:38827 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-547448715-172.17.0.2-1733619065492 (Datanode Uuid 8b1ca667-e9ca-498a-a7b3-9c9f98763352) service to localhost/127.0.0.1:38827 2024-12-08T00:51:09,101 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T00:51:09,102 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T00:51:09,102 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b00dbc42-c2ae-8329-7b75-f6549bfdae67/cluster_6c8cb891-6415-4cfe-b540-e3580e1e26a6/data/data1/current/BP-547448715-172.17.0.2-1733619065492 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T00:51:09,102 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b00dbc42-c2ae-8329-7b75-f6549bfdae67/cluster_6c8cb891-6415-4cfe-b540-e3580e1e26a6/data/data2/current/BP-547448715-172.17.0.2-1733619065492 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T00:51:09,102 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T00:51:09,108 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@39a32533{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-08T00:51:09,108 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2dcca062{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T00:51:09,108 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T00:51:09,109 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2fe718c7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T00:51:09,109 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@13a857dd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b00dbc42-c2ae-8329-7b75-f6549bfdae67/hadoop.log.dir/,STOPPED} 2024-12-08T00:51:09,114 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-08T00:51:09,128 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-08T00:51:09,135 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=271 (was 232) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:38827 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:38827 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38827 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:38827 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38827 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:38827 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38827 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:38827 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=532 (was 515) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=85 (was 85), ProcessCount=11 (was 11), AvailableMemoryMB=17245 (was 17251)