2024-11-14 06:44:09,375 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-14 06:44:09,394 main DEBUG Took 0.016304 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-14 06:44:09,395 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-14 06:44:09,396 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-14 06:44:09,397 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-14 06:44:09,400 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 06:44:09,410 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-14 06:44:09,432 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 06:44:09,434 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 06:44:09,435 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 06:44:09,435 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 06:44:09,436 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 06:44:09,437 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 06:44:09,439 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 06:44:09,439 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 06:44:09,440 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 06:44:09,441 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 06:44:09,442 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 06:44:09,443 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 06:44:09,444 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 06:44:09,445 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 06:44:09,446 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 06:44:09,446 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 06:44:09,447 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 06:44:09,448 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 06:44:09,449 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 06:44:09,449 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 06:44:09,450 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 06:44:09,451 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 06:44:09,452 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 06:44:09,452 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 06:44:09,453 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 06:44:09,453 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-14 06:44:09,455 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 06:44:09,457 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-14 06:44:09,460 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-14 06:44:09,461 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-14 06:44:09,463 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-14 06:44:09,463 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-14 06:44:09,475 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-14 06:44:09,478 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-14 06:44:09,481 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-14 06:44:09,482 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-14 06:44:09,482 main DEBUG createAppenders(={Console}) 2024-11-14 06:44:09,483 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-11-14 06:44:09,484 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-14 06:44:09,484 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-11-14 06:44:09,485 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-14 06:44:09,486 main DEBUG OutputStream closed 2024-11-14 06:44:09,486 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-14 06:44:09,486 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-14 06:44:09,487 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-11-14 06:44:09,589 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-14 06:44:09,592 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-14 06:44:09,593 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-14 06:44:09,594 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-14 06:44:09,595 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-14 06:44:09,596 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-14 06:44:09,596 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-14 06:44:09,596 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-14 06:44:09,597 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-14 06:44:09,597 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-14 06:44:09,598 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-14 06:44:09,598 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-14 06:44:09,599 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-14 06:44:09,599 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-14 06:44:09,599 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-14 06:44:09,600 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-14 06:44:09,600 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-14 06:44:09,601 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-14 06:44:09,603 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-14 06:44:09,604 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-11-14 06:44:09,604 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-14 06:44:09,605 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-11-14T06:44:09,959 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/da5135e7-1c3c-4121-547f-810b40c87e5f 2024-11-14 06:44:09,963 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-14 06:44:09,964 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-14T06:44:09,973 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-11-14T06:44:10,008 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=265, ProcessCount=11, AvailableMemoryMB=2363 2024-11-14T06:44:10,011 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-14T06:44:10,026 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/da5135e7-1c3c-4121-547f-810b40c87e5f/cluster_2e073713-b5cb-4197-b6cd-5ecb23063947, deleteOnExit=true 2024-11-14T06:44:10,027 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-14T06:44:10,028 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/da5135e7-1c3c-4121-547f-810b40c87e5f/test.cache.data in system properties and HBase conf 2024-11-14T06:44:10,028 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/da5135e7-1c3c-4121-547f-810b40c87e5f/hadoop.tmp.dir in system properties and HBase conf 2024-11-14T06:44:10,029 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/da5135e7-1c3c-4121-547f-810b40c87e5f/hadoop.log.dir in system properties and HBase conf 2024-11-14T06:44:10,030 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/da5135e7-1c3c-4121-547f-810b40c87e5f/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-14T06:44:10,030 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/da5135e7-1c3c-4121-547f-810b40c87e5f/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-14T06:44:10,031 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-14T06:44:10,123 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-14T06:44:10,233 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-14T06:44:10,238 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/da5135e7-1c3c-4121-547f-810b40c87e5f/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-14T06:44:10,239 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/da5135e7-1c3c-4121-547f-810b40c87e5f/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-14T06:44:10,239 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/da5135e7-1c3c-4121-547f-810b40c87e5f/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-14T06:44:10,240 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/da5135e7-1c3c-4121-547f-810b40c87e5f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T06:44:10,240 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/da5135e7-1c3c-4121-547f-810b40c87e5f/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-14T06:44:10,241 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/da5135e7-1c3c-4121-547f-810b40c87e5f/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-14T06:44:10,241 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/da5135e7-1c3c-4121-547f-810b40c87e5f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T06:44:10,242 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/da5135e7-1c3c-4121-547f-810b40c87e5f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T06:44:10,242 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/da5135e7-1c3c-4121-547f-810b40c87e5f/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-14T06:44:10,243 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/da5135e7-1c3c-4121-547f-810b40c87e5f/nfs.dump.dir in system properties and HBase conf 2024-11-14T06:44:10,244 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/da5135e7-1c3c-4121-547f-810b40c87e5f/java.io.tmpdir in system properties and HBase conf 2024-11-14T06:44:10,244 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/da5135e7-1c3c-4121-547f-810b40c87e5f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T06:44:10,245 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/da5135e7-1c3c-4121-547f-810b40c87e5f/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-14T06:44:10,245 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/da5135e7-1c3c-4121-547f-810b40c87e5f/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-14T06:44:10,799 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T06:44:11,173 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-14T06:44:11,256 INFO [Time-limited test {}] log.Log(170): Logging initialized @2851ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-14T06:44:11,343 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T06:44:11,412 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T06:44:11,436 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T06:44:11,437 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T06:44:11,439 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T06:44:11,452 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T06:44:11,455 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/da5135e7-1c3c-4121-547f-810b40c87e5f/hadoop.log.dir/,AVAILABLE} 2024-11-14T06:44:11,457 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T06:44:11,673 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@735fa16a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/da5135e7-1c3c-4121-547f-810b40c87e5f/java.io.tmpdir/jetty-localhost-38389-hadoop-hdfs-3_4_1-tests_jar-_-any-9464995227619881929/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T06:44:11,680 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:38389} 2024-11-14T06:44:11,680 INFO [Time-limited test {}] server.Server(415): Started @3277ms 2024-11-14T06:44:11,710 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T06:44:12,061 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T06:44:12,070 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T06:44:12,071 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T06:44:12,071 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T06:44:12,071 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T06:44:12,072 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/da5135e7-1c3c-4121-547f-810b40c87e5f/hadoop.log.dir/,AVAILABLE} 2024-11-14T06:44:12,073 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T06:44:12,190 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7b07d1ba{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/da5135e7-1c3c-4121-547f-810b40c87e5f/java.io.tmpdir/jetty-localhost-34325-hadoop-hdfs-3_4_1-tests_jar-_-any-14096715515446217412/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T06:44:12,191 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:34325} 2024-11-14T06:44:12,192 INFO [Time-limited test {}] server.Server(415): Started @3788ms 2024-11-14T06:44:12,248 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T06:44:12,416 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T06:44:12,424 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T06:44:12,429 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T06:44:12,429 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T06:44:12,429 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T06:44:12,446 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/da5135e7-1c3c-4121-547f-810b40c87e5f/hadoop.log.dir/,AVAILABLE} 2024-11-14T06:44:12,447 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T06:44:12,599 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1bf97579{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/da5135e7-1c3c-4121-547f-810b40c87e5f/java.io.tmpdir/jetty-localhost-33861-hadoop-hdfs-3_4_1-tests_jar-_-any-7652035451242315475/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T06:44:12,600 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:33861} 2024-11-14T06:44:12,600 INFO [Time-limited test {}] server.Server(415): Started @4197ms 2024-11-14T06:44:12,605 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T06:44:12,845 WARN [Thread-95 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/da5135e7-1c3c-4121-547f-810b40c87e5f/cluster_2e073713-b5cb-4197-b6cd-5ecb23063947/data/data1/current/BP-828308674-172.17.0.2-1731566650918/current, will proceed with Du for space computation calculation, 2024-11-14T06:44:12,848 WARN [Thread-97 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/da5135e7-1c3c-4121-547f-810b40c87e5f/cluster_2e073713-b5cb-4197-b6cd-5ecb23063947/data/data2/current/BP-828308674-172.17.0.2-1731566650918/current, will proceed with Du for space computation calculation, 2024-11-14T06:44:12,848 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/da5135e7-1c3c-4121-547f-810b40c87e5f/cluster_2e073713-b5cb-4197-b6cd-5ecb23063947/data/data4/current/BP-828308674-172.17.0.2-1731566650918/current, will proceed with Du for space computation calculation, 2024-11-14T06:44:12,849 WARN [Thread-96 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/da5135e7-1c3c-4121-547f-810b40c87e5f/cluster_2e073713-b5cb-4197-b6cd-5ecb23063947/data/data3/current/BP-828308674-172.17.0.2-1731566650918/current, will proceed with Du for space computation calculation, 2024-11-14T06:44:12,934 WARN [Thread-82 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T06:44:12,940 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T06:44:13,021 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4d070fe00b5c82a4 with lease ID 0xa3058b7512eb91c1: Processing first storage report for DS-f95ded65-4dd4-4418-9437-20f9aec080da from datanode DatanodeRegistration(127.0.0.1:43335, datanodeUuid=e128d9dc-a6c3-4cbc-8aa5-267311cfa7e1, infoPort=38547, infoSecurePort=0, ipcPort=35801, storageInfo=lv=-57;cid=testClusterID;nsid=1796989054;c=1731566650918) 2024-11-14T06:44:13,024 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4d070fe00b5c82a4 with lease ID 0xa3058b7512eb91c1: from storage DS-f95ded65-4dd4-4418-9437-20f9aec080da node DatanodeRegistration(127.0.0.1:43335, datanodeUuid=e128d9dc-a6c3-4cbc-8aa5-267311cfa7e1, infoPort=38547, infoSecurePort=0, ipcPort=35801, storageInfo=lv=-57;cid=testClusterID;nsid=1796989054;c=1731566650918), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-14T06:44:13,024 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xaa0462264cba8105 with lease ID 0xa3058b7512eb91c0: Processing first storage report for DS-0bbd120e-c309-4bc1-ab39-f74f7cf41f65 from datanode DatanodeRegistration(127.0.0.1:38983, datanodeUuid=dc5a9485-1db3-430d-bb85-5d3a83e8c4fe, infoPort=41679, infoSecurePort=0, ipcPort=40335, storageInfo=lv=-57;cid=testClusterID;nsid=1796989054;c=1731566650918) 2024-11-14T06:44:13,024 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xaa0462264cba8105 with lease ID 0xa3058b7512eb91c0: from storage DS-0bbd120e-c309-4bc1-ab39-f74f7cf41f65 node DatanodeRegistration(127.0.0.1:38983, datanodeUuid=dc5a9485-1db3-430d-bb85-5d3a83e8c4fe, infoPort=41679, infoSecurePort=0, ipcPort=40335, storageInfo=lv=-57;cid=testClusterID;nsid=1796989054;c=1731566650918), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T06:44:13,024 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4d070fe00b5c82a4 with lease ID 0xa3058b7512eb91c1: Processing first storage report for DS-20eab218-bcdb-44ee-ae19-634c7197358e from datanode DatanodeRegistration(127.0.0.1:43335, datanodeUuid=e128d9dc-a6c3-4cbc-8aa5-267311cfa7e1, infoPort=38547, infoSecurePort=0, ipcPort=35801, storageInfo=lv=-57;cid=testClusterID;nsid=1796989054;c=1731566650918) 2024-11-14T06:44:13,025 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4d070fe00b5c82a4 with lease ID 0xa3058b7512eb91c1: from storage DS-20eab218-bcdb-44ee-ae19-634c7197358e node DatanodeRegistration(127.0.0.1:43335, datanodeUuid=e128d9dc-a6c3-4cbc-8aa5-267311cfa7e1, infoPort=38547, infoSecurePort=0, ipcPort=35801, storageInfo=lv=-57;cid=testClusterID;nsid=1796989054;c=1731566650918), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T06:44:13,025 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xaa0462264cba8105 with lease ID 0xa3058b7512eb91c0: Processing first storage report for DS-36f3c9e8-a4ff-4d2d-b1af-f476ebe2bbac from datanode DatanodeRegistration(127.0.0.1:38983, datanodeUuid=dc5a9485-1db3-430d-bb85-5d3a83e8c4fe, infoPort=41679, infoSecurePort=0, ipcPort=40335, storageInfo=lv=-57;cid=testClusterID;nsid=1796989054;c=1731566650918) 2024-11-14T06:44:13,025 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xaa0462264cba8105 with lease ID 0xa3058b7512eb91c0: from storage DS-36f3c9e8-a4ff-4d2d-b1af-f476ebe2bbac node DatanodeRegistration(127.0.0.1:38983, datanodeUuid=dc5a9485-1db3-430d-bb85-5d3a83e8c4fe, infoPort=41679, infoSecurePort=0, ipcPort=40335, storageInfo=lv=-57;cid=testClusterID;nsid=1796989054;c=1731566650918), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T06:44:13,121 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/da5135e7-1c3c-4121-547f-810b40c87e5f 2024-11-14T06:44:13,241 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/da5135e7-1c3c-4121-547f-810b40c87e5f/cluster_2e073713-b5cb-4197-b6cd-5ecb23063947/zookeeper_0, clientPort=61515, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/da5135e7-1c3c-4121-547f-810b40c87e5f/cluster_2e073713-b5cb-4197-b6cd-5ecb23063947/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/da5135e7-1c3c-4121-547f-810b40c87e5f/cluster_2e073713-b5cb-4197-b6cd-5ecb23063947/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-14T06:44:13,260 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=61515 2024-11-14T06:44:13,278 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T06:44:13,282 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T06:44:13,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38983 is added to blk_1073741825_1001 (size=7) 2024-11-14T06:44:13,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43335 is added to blk_1073741825_1001 (size=7) 2024-11-14T06:44:14,004 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6 with version=8 2024-11-14T06:44:14,004 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/hbase-staging 2024-11-14T06:44:14,090 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-14T06:44:14,313 INFO [Time-limited test {}] client.ConnectionUtils(128): master/20680646cf8a:0 server-side Connection retries=45 2024-11-14T06:44:14,323 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T06:44:14,323 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T06:44:14,329 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T06:44:14,329 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T06:44:14,329 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T06:44:14,470 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-14T06:44:14,532 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-14T06:44:14,540 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-14T06:44:14,543 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T06:44:14,567 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 46116 (auto-detected) 2024-11-14T06:44:14,568 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-14T06:44:14,587 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41891 2024-11-14T06:44:14,606 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:41891 connecting to ZooKeeper ensemble=127.0.0.1:61515 2024-11-14T06:44:14,632 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:418910x0, quorum=127.0.0.1:61515, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T06:44:14,635 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41891-0x1003cf9ab910000 connected 2024-11-14T06:44:14,664 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T06:44:14,667 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T06:44:14,675 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41891-0x1003cf9ab910000, quorum=127.0.0.1:61515, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T06:44:14,679 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6, hbase.cluster.distributed=false 2024-11-14T06:44:14,711 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41891-0x1003cf9ab910000, quorum=127.0.0.1:61515, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T06:44:14,721 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41891 2024-11-14T06:44:14,722 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41891 2024-11-14T06:44:14,724 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41891 2024-11-14T06:44:14,733 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41891 2024-11-14T06:44:14,733 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41891 2024-11-14T06:44:14,853 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/20680646cf8a:0 server-side Connection retries=45 2024-11-14T06:44:14,855 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T06:44:14,855 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T06:44:14,856 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T06:44:14,856 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T06:44:14,856 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T06:44:14,860 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-14T06:44:14,864 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T06:44:14,865 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36049 2024-11-14T06:44:14,868 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36049 connecting to ZooKeeper ensemble=127.0.0.1:61515 2024-11-14T06:44:14,869 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T06:44:14,876 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T06:44:14,885 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:360490x0, quorum=127.0.0.1:61515, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T06:44:14,888 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:360490x0, quorum=127.0.0.1:61515, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T06:44:14,889 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36049-0x1003cf9ab910001 connected 2024-11-14T06:44:14,894 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-14T06:44:14,903 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-14T06:44:14,907 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36049-0x1003cf9ab910001, quorum=127.0.0.1:61515, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-14T06:44:14,912 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36049-0x1003cf9ab910001, quorum=127.0.0.1:61515, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T06:44:14,913 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36049 2024-11-14T06:44:14,913 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36049 2024-11-14T06:44:14,914 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36049 2024-11-14T06:44:14,916 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36049 2024-11-14T06:44:14,916 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36049 2024-11-14T06:44:14,935 DEBUG [M:0;20680646cf8a:41891 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;20680646cf8a:41891 2024-11-14T06:44:14,937 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/20680646cf8a,41891,1731566654142 2024-11-14T06:44:14,944 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36049-0x1003cf9ab910001, quorum=127.0.0.1:61515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T06:44:14,945 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41891-0x1003cf9ab910000, quorum=127.0.0.1:61515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T06:44:14,946 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41891-0x1003cf9ab910000, quorum=127.0.0.1:61515, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/20680646cf8a,41891,1731566654142 2024-11-14T06:44:14,974 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36049-0x1003cf9ab910001, quorum=127.0.0.1:61515, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-14T06:44:14,974 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41891-0x1003cf9ab910000, quorum=127.0.0.1:61515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:44:14,974 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36049-0x1003cf9ab910001, quorum=127.0.0.1:61515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:44:14,975 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41891-0x1003cf9ab910000, quorum=127.0.0.1:61515, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-14T06:44:14,977 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/20680646cf8a,41891,1731566654142 from backup master directory 2024-11-14T06:44:14,980 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41891-0x1003cf9ab910000, quorum=127.0.0.1:61515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/20680646cf8a,41891,1731566654142 2024-11-14T06:44:14,980 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36049-0x1003cf9ab910001, quorum=127.0.0.1:61515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T06:44:14,981 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41891-0x1003cf9ab910000, quorum=127.0.0.1:61515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T06:44:14,981 WARN [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T06:44:14,982 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=20680646cf8a,41891,1731566654142 2024-11-14T06:44:14,984 INFO [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-14T06:44:14,986 INFO [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-14T06:44:15,054 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/hbase.id] with ID: cbbca2e1-12ab-4a6b-ace5-9061ba3e6833 2024-11-14T06:44:15,054 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/.tmp/hbase.id 2024-11-14T06:44:15,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43335 is added to blk_1073741826_1002 (size=42) 2024-11-14T06:44:15,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38983 is added to blk_1073741826_1002 (size=42) 2024-11-14T06:44:15,072 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/.tmp/hbase.id]:[hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/hbase.id] 2024-11-14T06:44:15,119 INFO [master/20680646cf8a:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T06:44:15,126 INFO [master/20680646cf8a:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-14T06:44:15,150 INFO [master/20680646cf8a:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 21ms. 2024-11-14T06:44:15,154 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41891-0x1003cf9ab910000, quorum=127.0.0.1:61515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:44:15,154 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36049-0x1003cf9ab910001, quorum=127.0.0.1:61515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:44:15,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38983 is added to blk_1073741827_1003 (size=196) 2024-11-14T06:44:15,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43335 is added to blk_1073741827_1003 (size=196) 2024-11-14T06:44:15,197 INFO [master/20680646cf8a:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-14T06:44:15,199 INFO [master/20680646cf8a:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-14T06:44:15,207 INFO [master/20680646cf8a:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T06:44:15,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43335 is added to blk_1073741828_1004 (size=1189) 2024-11-14T06:44:15,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38983 is added to blk_1073741828_1004 (size=1189) 2024-11-14T06:44:15,263 INFO [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/MasterData/data/master/store 2024-11-14T06:44:15,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38983 is added to blk_1073741829_1005 (size=34) 2024-11-14T06:44:15,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43335 is added to blk_1073741829_1005 (size=34) 2024-11-14T06:44:15,291 INFO [master/20680646cf8a:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-14T06:44:15,295 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T06:44:15,297 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T06:44:15,297 INFO [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T06:44:15,297 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T06:44:15,299 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T06:44:15,300 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T06:44:15,300 INFO [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T06:44:15,302 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731566655297Disabling compacts and flushes for region at 1731566655297Disabling writes for close at 1731566655300 (+3 ms)Writing region close event to WAL at 1731566655300Closed at 1731566655300 2024-11-14T06:44:15,304 WARN [master/20680646cf8a:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/MasterData/data/master/store/.initializing 2024-11-14T06:44:15,304 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/MasterData/WALs/20680646cf8a,41891,1731566654142 2024-11-14T06:44:15,333 INFO [master/20680646cf8a:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=20680646cf8a%2C41891%2C1731566654142, suffix=, logDir=hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/MasterData/WALs/20680646cf8a,41891,1731566654142, archiveDir=hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/MasterData/oldWALs, maxLogs=10 2024-11-14T06:44:15,345 INFO [master/20680646cf8a:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 20680646cf8a%2C41891%2C1731566654142.1731566655340 2024-11-14T06:44:15,366 INFO [master/20680646cf8a:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/MasterData/WALs/20680646cf8a,41891,1731566654142/20680646cf8a%2C41891%2C1731566654142.1731566655340 2024-11-14T06:44:15,382 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38547:38547),(127.0.0.1/127.0.0.1:41679:41679)] 2024-11-14T06:44:15,387 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-14T06:44:15,388 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T06:44:15,392 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:44:15,393 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:44:15,438 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:44:15,465 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-14T06:44:15,469 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:44:15,472 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T06:44:15,472 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:44:15,476 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-14T06:44:15,476 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:44:15,477 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T06:44:15,477 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:44:15,481 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-14T06:44:15,481 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:44:15,482 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T06:44:15,483 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:44:15,487 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-14T06:44:15,487 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:44:15,489 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T06:44:15,490 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:44:15,494 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:44:15,495 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:44:15,501 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:44:15,501 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:44:15,504 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-14T06:44:15,509 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:44:15,518 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T06:44:15,520 INFO [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=786759, jitterRate=4.1650235652923584E-4}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-14T06:44:15,530 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731566655409Initializing all the Stores at 1731566655413 (+4 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731566655414 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731566655414Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731566655415 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731566655415Cleaning up temporary data from old regions at 1731566655501 (+86 ms)Region opened successfully at 1731566655530 (+29 ms) 2024-11-14T06:44:15,532 INFO [master/20680646cf8a:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-14T06:44:15,569 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f096042, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=20680646cf8a/172.17.0.2:0 2024-11-14T06:44:15,603 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-14T06:44:15,615 INFO [master/20680646cf8a:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-14T06:44:15,616 INFO [master/20680646cf8a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-14T06:44:15,618 INFO [master/20680646cf8a:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-14T06:44:15,620 INFO [master/20680646cf8a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-14T06:44:15,626 INFO [master/20680646cf8a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 6 msec 2024-11-14T06:44:15,627 INFO [master/20680646cf8a:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-14T06:44:15,657 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-14T06:44:15,670 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41891-0x1003cf9ab910000, quorum=127.0.0.1:61515, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-14T06:44:15,671 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-14T06:44:15,675 INFO [master/20680646cf8a:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-14T06:44:15,676 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41891-0x1003cf9ab910000, quorum=127.0.0.1:61515, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-14T06:44:15,677 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-14T06:44:15,680 INFO [master/20680646cf8a:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-14T06:44:15,686 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41891-0x1003cf9ab910000, quorum=127.0.0.1:61515, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-14T06:44:15,687 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-14T06:44:15,689 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41891-0x1003cf9ab910000, quorum=127.0.0.1:61515, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-14T06:44:15,690 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-14T06:44:15,706 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41891-0x1003cf9ab910000, quorum=127.0.0.1:61515, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-14T06:44:15,707 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-14T06:44:15,713 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36049-0x1003cf9ab910001, quorum=127.0.0.1:61515, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T06:44:15,713 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41891-0x1003cf9ab910000, quorum=127.0.0.1:61515, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T06:44:15,713 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36049-0x1003cf9ab910001, quorum=127.0.0.1:61515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:44:15,713 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41891-0x1003cf9ab910000, quorum=127.0.0.1:61515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:44:15,717 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=20680646cf8a,41891,1731566654142, sessionid=0x1003cf9ab910000, setting cluster-up flag (Was=false) 2024-11-14T06:44:15,731 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41891-0x1003cf9ab910000, quorum=127.0.0.1:61515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:44:15,731 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36049-0x1003cf9ab910001, quorum=127.0.0.1:61515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:44:15,736 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-14T06:44:15,737 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=20680646cf8a,41891,1731566654142 2024-11-14T06:44:15,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36049-0x1003cf9ab910001, quorum=127.0.0.1:61515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:44:15,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41891-0x1003cf9ab910000, quorum=127.0.0.1:61515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:44:15,748 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-14T06:44:15,750 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=20680646cf8a,41891,1731566654142 2024-11-14T06:44:15,758 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-14T06:44:15,820 INFO [RS:0;20680646cf8a:36049 {}] regionserver.HRegionServer(746): ClusterId : cbbca2e1-12ab-4a6b-ace5-9061ba3e6833 2024-11-14T06:44:15,822 DEBUG [RS:0;20680646cf8a:36049 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-14T06:44:15,827 DEBUG [RS:0;20680646cf8a:36049 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-14T06:44:15,827 DEBUG [RS:0;20680646cf8a:36049 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-14T06:44:15,830 DEBUG [RS:0;20680646cf8a:36049 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-14T06:44:15,831 DEBUG [RS:0;20680646cf8a:36049 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5dd2cb57, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=20680646cf8a/172.17.0.2:0 2024-11-14T06:44:15,845 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-14T06:44:15,852 DEBUG [RS:0;20680646cf8a:36049 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;20680646cf8a:36049 2024-11-14T06:44:15,856 INFO [RS:0;20680646cf8a:36049 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-14T06:44:15,857 INFO [RS:0;20680646cf8a:36049 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-14T06:44:15,857 DEBUG [RS:0;20680646cf8a:36049 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-14T06:44:15,860 INFO [RS:0;20680646cf8a:36049 {}] regionserver.HRegionServer(2659): reportForDuty to master=20680646cf8a,41891,1731566654142 with port=36049, startcode=1731566654813 2024-11-14T06:44:15,860 INFO [master/20680646cf8a:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-14T06:44:15,868 INFO [master/20680646cf8a:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-14T06:44:15,874 DEBUG [RS:0;20680646cf8a:36049 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-14T06:44:15,876 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 20680646cf8a,41891,1731566654142 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-14T06:44:15,888 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/20680646cf8a:0, corePoolSize=5, maxPoolSize=5 2024-11-14T06:44:15,889 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/20680646cf8a:0, corePoolSize=5, maxPoolSize=5 2024-11-14T06:44:15,889 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/20680646cf8a:0, corePoolSize=5, maxPoolSize=5 2024-11-14T06:44:15,889 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/20680646cf8a:0, corePoolSize=5, maxPoolSize=5 2024-11-14T06:44:15,889 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/20680646cf8a:0, corePoolSize=10, maxPoolSize=10 2024-11-14T06:44:15,890 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:44:15,890 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/20680646cf8a:0, corePoolSize=2, maxPoolSize=2 2024-11-14T06:44:15,890 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:44:15,905 INFO [master/20680646cf8a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731566685905 2024-11-14T06:44:15,906 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T06:44:15,906 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-14T06:44:15,907 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-14T06:44:15,909 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-14T06:44:15,913 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-14T06:44:15,914 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-14T06:44:15,915 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-14T06:44:15,915 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-14T06:44:15,915 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:44:15,916 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-14T06:44:15,921 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T06:44:15,929 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-14T06:44:15,931 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-14T06:44:15,931 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-14T06:44:15,950 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-14T06:44:15,951 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-14T06:44:15,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38983 is added to blk_1073741831_1007 (size=1321) 2024-11-14T06:44:15,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43335 is added to blk_1073741831_1007 (size=1321) 2024-11-14T06:44:15,961 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-14T06:44:15,962 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6 2024-11-14T06:44:15,965 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/20680646cf8a:0:becomeActiveMaster-HFileCleaner.large.0-1731566655952,5,FailOnTimeoutGroup] 2024-11-14T06:44:15,969 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/20680646cf8a:0:becomeActiveMaster-HFileCleaner.small.0-1731566655965,5,FailOnTimeoutGroup] 2024-11-14T06:44:15,969 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T06:44:15,970 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-14T06:44:15,976 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-14T06:44:15,979 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-14T06:44:15,979 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46609, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-14T06:44:15,989 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41891 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 20680646cf8a,36049,1731566654813 2024-11-14T06:44:15,993 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41891 {}] master.ServerManager(517): Registering regionserver=20680646cf8a,36049,1731566654813 2024-11-14T06:44:16,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38983 is added to blk_1073741832_1008 (size=32) 2024-11-14T06:44:16,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43335 is added to blk_1073741832_1008 (size=32) 2024-11-14T06:44:16,008 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T06:44:16,012 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T06:44:16,016 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T06:44:16,016 DEBUG [RS:0;20680646cf8a:36049 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6 2024-11-14T06:44:16,016 DEBUG [RS:0;20680646cf8a:36049 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33705 2024-11-14T06:44:16,016 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:44:16,017 DEBUG [RS:0;20680646cf8a:36049 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-14T06:44:16,021 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41891-0x1003cf9ab910000, quorum=127.0.0.1:61515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T06:44:16,022 DEBUG [RS:0;20680646cf8a:36049 {}] zookeeper.ZKUtil(111): regionserver:36049-0x1003cf9ab910001, quorum=127.0.0.1:61515, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/20680646cf8a,36049,1731566654813 2024-11-14T06:44:16,022 WARN [RS:0;20680646cf8a:36049 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T06:44:16,022 INFO [RS:0;20680646cf8a:36049 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T06:44:16,022 DEBUG [RS:0;20680646cf8a:36049 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/WALs/20680646cf8a,36049,1731566654813 2024-11-14T06:44:16,025 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [20680646cf8a,36049,1731566654813] 2024-11-14T06:44:16,026 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T06:44:16,027 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T06:44:16,030 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T06:44:16,030 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:44:16,032 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T06:44:16,032 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T06:44:16,036 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T06:44:16,036 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:44:16,037 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T06:44:16,037 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T06:44:16,041 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T06:44:16,041 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:44:16,042 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T06:44:16,042 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T06:44:16,045 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/hbase/meta/1588230740 2024-11-14T06:44:16,046 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/hbase/meta/1588230740 2024-11-14T06:44:16,053 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T06:44:16,053 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T06:44:16,055 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T06:44:16,056 INFO [RS:0;20680646cf8a:36049 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-14T06:44:16,058 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T06:44:16,064 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T06:44:16,066 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=748997, jitterRate=-0.047601133584976196}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T06:44:16,070 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731566656009Initializing all the Stores at 1731566656011 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731566656011Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731566656012 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731566656012Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731566656012Cleaning up temporary data from old regions at 1731566656053 (+41 ms)Region opened successfully at 1731566656070 (+17 ms) 2024-11-14T06:44:16,071 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T06:44:16,071 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T06:44:16,071 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T06:44:16,071 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T06:44:16,071 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T06:44:16,072 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T06:44:16,072 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731566656071Disabling compacts and flushes for region at 1731566656071Disabling writes for close at 1731566656071Writing region close event to WAL at 1731566656072 (+1 ms)Closed at 1731566656072 2024-11-14T06:44:16,074 INFO [RS:0;20680646cf8a:36049 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-14T06:44:16,076 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T06:44:16,076 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-14T06:44:16,080 INFO [RS:0;20680646cf8a:36049 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-14T06:44:16,080 INFO [RS:0;20680646cf8a:36049 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T06:44:16,081 INFO [RS:0;20680646cf8a:36049 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-14T06:44:16,081 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-14T06:44:16,091 INFO [RS:0;20680646cf8a:36049 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-14T06:44:16,093 INFO [RS:0;20680646cf8a:36049 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-14T06:44:16,094 DEBUG [RS:0;20680646cf8a:36049 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:44:16,094 DEBUG [RS:0;20680646cf8a:36049 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:44:16,094 DEBUG [RS:0;20680646cf8a:36049 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:44:16,094 DEBUG [RS:0;20680646cf8a:36049 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:44:16,094 DEBUG [RS:0;20680646cf8a:36049 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:44:16,094 DEBUG [RS:0;20680646cf8a:36049 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/20680646cf8a:0, corePoolSize=2, maxPoolSize=2 2024-11-14T06:44:16,094 DEBUG [RS:0;20680646cf8a:36049 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:44:16,095 DEBUG [RS:0;20680646cf8a:36049 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:44:16,095 DEBUG [RS:0;20680646cf8a:36049 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:44:16,095 DEBUG [RS:0;20680646cf8a:36049 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:44:16,096 DEBUG [RS:0;20680646cf8a:36049 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:44:16,096 DEBUG [RS:0;20680646cf8a:36049 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:44:16,096 DEBUG [RS:0;20680646cf8a:36049 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/20680646cf8a:0, corePoolSize=3, maxPoolSize=3 2024-11-14T06:44:16,096 DEBUG [RS:0;20680646cf8a:36049 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/20680646cf8a:0, corePoolSize=3, maxPoolSize=3 2024-11-14T06:44:16,098 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T06:44:16,103 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-14T06:44:16,104 INFO [RS:0;20680646cf8a:36049 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T06:44:16,105 INFO [RS:0;20680646cf8a:36049 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T06:44:16,105 INFO [RS:0;20680646cf8a:36049 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T06:44:16,105 INFO [RS:0;20680646cf8a:36049 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-14T06:44:16,105 INFO [RS:0;20680646cf8a:36049 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-14T06:44:16,106 INFO [RS:0;20680646cf8a:36049 {}] hbase.ChoreService(168): Chore ScheduledChore name=20680646cf8a,36049,1731566654813-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T06:44:16,132 INFO [RS:0;20680646cf8a:36049 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-14T06:44:16,135 INFO [RS:0;20680646cf8a:36049 {}] hbase.ChoreService(168): Chore ScheduledChore name=20680646cf8a,36049,1731566654813-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T06:44:16,135 INFO [RS:0;20680646cf8a:36049 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T06:44:16,135 INFO [RS:0;20680646cf8a:36049 {}] regionserver.Replication(171): 20680646cf8a,36049,1731566654813 started 2024-11-14T06:44:16,158 INFO [RS:0;20680646cf8a:36049 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T06:44:16,159 INFO [RS:0;20680646cf8a:36049 {}] regionserver.HRegionServer(1482): Serving as 20680646cf8a,36049,1731566654813, RpcServer on 20680646cf8a/172.17.0.2:36049, sessionid=0x1003cf9ab910001 2024-11-14T06:44:16,160 DEBUG [RS:0;20680646cf8a:36049 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-14T06:44:16,160 DEBUG [RS:0;20680646cf8a:36049 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 20680646cf8a,36049,1731566654813 2024-11-14T06:44:16,160 DEBUG [RS:0;20680646cf8a:36049 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '20680646cf8a,36049,1731566654813' 2024-11-14T06:44:16,160 DEBUG [RS:0;20680646cf8a:36049 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-14T06:44:16,161 DEBUG [RS:0;20680646cf8a:36049 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-14T06:44:16,163 DEBUG [RS:0;20680646cf8a:36049 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-14T06:44:16,163 DEBUG [RS:0;20680646cf8a:36049 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-14T06:44:16,163 DEBUG [RS:0;20680646cf8a:36049 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 20680646cf8a,36049,1731566654813 2024-11-14T06:44:16,163 DEBUG [RS:0;20680646cf8a:36049 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '20680646cf8a,36049,1731566654813' 2024-11-14T06:44:16,163 DEBUG [RS:0;20680646cf8a:36049 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-14T06:44:16,165 DEBUG [RS:0;20680646cf8a:36049 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-14T06:44:16,166 DEBUG [RS:0;20680646cf8a:36049 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-14T06:44:16,166 INFO [RS:0;20680646cf8a:36049 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-14T06:44:16,167 INFO [RS:0;20680646cf8a:36049 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-14T06:44:16,255 WARN [20680646cf8a:41891 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-14T06:44:16,274 INFO [RS:0;20680646cf8a:36049 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=20680646cf8a%2C36049%2C1731566654813, suffix=, logDir=hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/WALs/20680646cf8a,36049,1731566654813, archiveDir=hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/oldWALs, maxLogs=32 2024-11-14T06:44:16,276 INFO [RS:0;20680646cf8a:36049 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 20680646cf8a%2C36049%2C1731566654813.1731566656276 2024-11-14T06:44:16,292 INFO [RS:0;20680646cf8a:36049 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/WALs/20680646cf8a,36049,1731566654813/20680646cf8a%2C36049%2C1731566654813.1731566656276 2024-11-14T06:44:16,294 DEBUG [RS:0;20680646cf8a:36049 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41679:41679),(127.0.0.1/127.0.0.1:38547:38547)] 2024-11-14T06:44:16,508 DEBUG [20680646cf8a:41891 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-14T06:44:16,523 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=20680646cf8a,36049,1731566654813 2024-11-14T06:44:16,530 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 20680646cf8a,36049,1731566654813, state=OPENING 2024-11-14T06:44:16,536 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-14T06:44:16,537 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36049-0x1003cf9ab910001, quorum=127.0.0.1:61515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:44:16,538 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41891-0x1003cf9ab910000, quorum=127.0.0.1:61515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:44:16,539 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T06:44:16,539 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T06:44:16,540 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T06:44:16,542 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=20680646cf8a,36049,1731566654813}] 2024-11-14T06:44:16,719 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-14T06:44:16,723 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58541, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-14T06:44:16,734 INFO [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-14T06:44:16,735 INFO [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T06:44:16,739 INFO [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=20680646cf8a%2C36049%2C1731566654813.meta, suffix=.meta, logDir=hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/WALs/20680646cf8a,36049,1731566654813, archiveDir=hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/oldWALs, maxLogs=32 2024-11-14T06:44:16,742 INFO [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 20680646cf8a%2C36049%2C1731566654813.meta.1731566656742.meta 2024-11-14T06:44:16,754 INFO [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/WALs/20680646cf8a,36049,1731566654813/20680646cf8a%2C36049%2C1731566654813.meta.1731566656742.meta 2024-11-14T06:44:16,756 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41679:41679),(127.0.0.1/127.0.0.1:38547:38547)] 2024-11-14T06:44:16,758 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-14T06:44:16,760 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-14T06:44:16,763 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-14T06:44:16,770 INFO [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-14T06:44:16,774 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-14T06:44:16,774 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T06:44:16,774 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-14T06:44:16,775 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-14T06:44:16,778 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T06:44:16,779 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T06:44:16,779 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:44:16,780 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T06:44:16,780 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T06:44:16,782 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T06:44:16,782 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:44:16,783 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T06:44:16,783 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T06:44:16,785 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T06:44:16,785 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:44:16,786 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T06:44:16,786 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T06:44:16,788 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T06:44:16,788 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:44:16,789 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T06:44:16,790 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T06:44:16,793 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/hbase/meta/1588230740 2024-11-14T06:44:16,796 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/hbase/meta/1588230740 2024-11-14T06:44:16,800 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T06:44:16,800 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T06:44:16,801 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T06:44:16,804 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T06:44:16,806 INFO [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=779341, jitterRate=-0.009017258882522583}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T06:44:16,807 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-14T06:44:16,808 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731566656775Writing region info on filesystem at 1731566656775Initializing all the Stores at 1731566656777 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731566656777Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731566656777Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731566656777Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731566656777Cleaning up temporary data from old regions at 1731566656800 (+23 ms)Running coprocessor post-open hooks at 1731566656807 (+7 ms)Region opened successfully at 1731566656808 (+1 ms) 2024-11-14T06:44:16,817 INFO [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731566656708 2024-11-14T06:44:16,831 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-14T06:44:16,832 INFO [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-14T06:44:16,833 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=20680646cf8a,36049,1731566654813 2024-11-14T06:44:16,835 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 20680646cf8a,36049,1731566654813, state=OPEN 2024-11-14T06:44:16,839 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41891-0x1003cf9ab910000, quorum=127.0.0.1:61515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T06:44:16,839 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36049-0x1003cf9ab910001, quorum=127.0.0.1:61515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T06:44:16,839 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T06:44:16,839 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T06:44:16,839 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=20680646cf8a,36049,1731566654813 2024-11-14T06:44:16,847 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-14T06:44:16,847 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=20680646cf8a,36049,1731566654813 in 298 msec 2024-11-14T06:44:16,856 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-14T06:44:16,856 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 768 msec 2024-11-14T06:44:16,857 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T06:44:16,857 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-14T06:44:16,879 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T06:44:16,881 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=20680646cf8a,36049,1731566654813, seqNum=-1] 2024-11-14T06:44:16,910 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T06:44:16,912 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52897, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T06:44:16,935 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.1390 sec 2024-11-14T06:44:16,935 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731566656935, completionTime=-1 2024-11-14T06:44:16,939 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-14T06:44:16,940 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-14T06:44:16,973 INFO [master/20680646cf8a:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-14T06:44:16,973 INFO [master/20680646cf8a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731566716973 2024-11-14T06:44:16,973 INFO [master/20680646cf8a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731566776973 2024-11-14T06:44:16,973 INFO [master/20680646cf8a:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 33 msec 2024-11-14T06:44:16,980 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=20680646cf8a,41891,1731566654142-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T06:44:16,980 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=20680646cf8a,41891,1731566654142-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T06:44:16,981 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=20680646cf8a,41891,1731566654142-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T06:44:16,983 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-20680646cf8a:41891, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T06:44:16,983 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-14T06:44:16,984 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-14T06:44:16,989 DEBUG [master/20680646cf8a:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-14T06:44:17,016 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.034sec 2024-11-14T06:44:17,018 INFO [master/20680646cf8a:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-14T06:44:17,019 INFO [master/20680646cf8a:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-14T06:44:17,020 INFO [master/20680646cf8a:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-14T06:44:17,021 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-14T06:44:17,021 INFO [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-14T06:44:17,022 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=20680646cf8a,41891,1731566654142-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T06:44:17,023 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=20680646cf8a,41891,1731566654142-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-14T06:44:17,032 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@64202c07, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T06:44:17,033 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-14T06:44:17,034 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-14T06:44:17,035 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-14T06:44:17,035 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=20680646cf8a,41891,1731566654142-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T06:44:17,035 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-14T06:44:17,039 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 20680646cf8a,41891,-1 for getting cluster id 2024-11-14T06:44:17,042 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-14T06:44:17,051 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'cbbca2e1-12ab-4a6b-ace5-9061ba3e6833' 2024-11-14T06:44:17,054 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-14T06:44:17,054 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "cbbca2e1-12ab-4a6b-ace5-9061ba3e6833" 2024-11-14T06:44:17,056 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@24713418, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T06:44:17,056 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [20680646cf8a,41891,-1] 2024-11-14T06:44:17,059 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-14T06:44:17,061 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T06:44:17,063 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51512, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-14T06:44:17,066 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@717eca08, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T06:44:17,067 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T06:44:17,076 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=20680646cf8a,36049,1731566654813, seqNum=-1] 2024-11-14T06:44:17,076 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T06:44:17,079 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33932, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T06:44:17,104 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=20680646cf8a,41891,1731566654142 2024-11-14T06:44:17,105 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T06:44:17,116 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-14T06:44:17,120 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-14T06:44:17,128 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 20680646cf8a,41891,1731566654142 2024-11-14T06:44:17,131 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@2e192e5e 2024-11-14T06:44:17,133 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-14T06:44:17,137 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51522, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-14T06:44:17,145 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41891 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-14T06:44:17,146 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41891 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-14T06:44:17,153 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41891 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-14T06:44:17,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41891 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-11-14T06:44:17,166 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-14T06:44:17,168 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41891 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-11-14T06:44:17,169 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:44:17,174 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-14T06:44:17,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41891 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-14T06:44:17,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38983 is added to blk_1073741835_1011 (size=389) 2024-11-14T06:44:17,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43335 is added to blk_1073741835_1011 (size=389) 2024-11-14T06:44:17,270 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => d9de73af852b586b02bc22d70a1925cf, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731566657145.d9de73af852b586b02bc22d70a1925cf.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6 2024-11-14T06:44:17,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43335 is added to blk_1073741836_1012 (size=72) 2024-11-14T06:44:17,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38983 is added to blk_1073741836_1012 (size=72) 2024-11-14T06:44:17,299 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731566657145.d9de73af852b586b02bc22d70a1925cf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T06:44:17,299 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing d9de73af852b586b02bc22d70a1925cf, disabling compactions & flushes 2024-11-14T06:44:17,299 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731566657145.d9de73af852b586b02bc22d70a1925cf. 2024-11-14T06:44:17,299 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731566657145.d9de73af852b586b02bc22d70a1925cf. 2024-11-14T06:44:17,299 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731566657145.d9de73af852b586b02bc22d70a1925cf. after waiting 0 ms 2024-11-14T06:44:17,299 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731566657145.d9de73af852b586b02bc22d70a1925cf. 2024-11-14T06:44:17,299 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731566657145.d9de73af852b586b02bc22d70a1925cf. 2024-11-14T06:44:17,299 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for d9de73af852b586b02bc22d70a1925cf: Waiting for close lock at 1731566657299Disabling compacts and flushes for region at 1731566657299Disabling writes for close at 1731566657299Writing region close event to WAL at 1731566657299Closed at 1731566657299 2024-11-14T06:44:17,302 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-14T06:44:17,308 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1731566657145.d9de73af852b586b02bc22d70a1925cf.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1731566657303"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731566657303"}]},"ts":"1731566657303"} 2024-11-14T06:44:17,316 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-14T06:44:17,318 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-14T06:44:17,321 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731566657318"}]},"ts":"1731566657318"} 2024-11-14T06:44:17,326 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-11-14T06:44:17,328 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=d9de73af852b586b02bc22d70a1925cf, ASSIGN}] 2024-11-14T06:44:17,331 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=d9de73af852b586b02bc22d70a1925cf, ASSIGN 2024-11-14T06:44:17,333 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=d9de73af852b586b02bc22d70a1925cf, ASSIGN; state=OFFLINE, location=20680646cf8a,36049,1731566654813; forceNewPlan=false, retain=false 2024-11-14T06:44:17,485 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=d9de73af852b586b02bc22d70a1925cf, regionState=OPENING, regionLocation=20680646cf8a,36049,1731566654813 2024-11-14T06:44:17,493 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=d9de73af852b586b02bc22d70a1925cf, ASSIGN because future has completed 2024-11-14T06:44:17,494 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure d9de73af852b586b02bc22d70a1925cf, server=20680646cf8a,36049,1731566654813}] 2024-11-14T06:44:17,659 INFO [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1731566657145.d9de73af852b586b02bc22d70a1925cf. 2024-11-14T06:44:17,659 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => d9de73af852b586b02bc22d70a1925cf, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731566657145.d9de73af852b586b02bc22d70a1925cf.', STARTKEY => '', ENDKEY => ''} 2024-11-14T06:44:17,660 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling d9de73af852b586b02bc22d70a1925cf 2024-11-14T06:44:17,660 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731566657145.d9de73af852b586b02bc22d70a1925cf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T06:44:17,660 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for d9de73af852b586b02bc22d70a1925cf 2024-11-14T06:44:17,660 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for d9de73af852b586b02bc22d70a1925cf 2024-11-14T06:44:17,663 INFO [StoreOpener-d9de73af852b586b02bc22d70a1925cf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region d9de73af852b586b02bc22d70a1925cf 2024-11-14T06:44:17,665 INFO [StoreOpener-d9de73af852b586b02bc22d70a1925cf-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d9de73af852b586b02bc22d70a1925cf columnFamilyName info 2024-11-14T06:44:17,665 DEBUG [StoreOpener-d9de73af852b586b02bc22d70a1925cf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:44:17,667 INFO [StoreOpener-d9de73af852b586b02bc22d70a1925cf-1 {}] regionserver.HStore(327): Store=d9de73af852b586b02bc22d70a1925cf/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T06:44:17,667 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for d9de73af852b586b02bc22d70a1925cf 2024-11-14T06:44:17,668 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/default/TestLogRolling-testSlowSyncLogRolling/d9de73af852b586b02bc22d70a1925cf 2024-11-14T06:44:17,669 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/default/TestLogRolling-testSlowSyncLogRolling/d9de73af852b586b02bc22d70a1925cf 2024-11-14T06:44:17,670 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for d9de73af852b586b02bc22d70a1925cf 2024-11-14T06:44:17,670 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for d9de73af852b586b02bc22d70a1925cf 2024-11-14T06:44:17,672 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for d9de73af852b586b02bc22d70a1925cf 2024-11-14T06:44:17,676 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/default/TestLogRolling-testSlowSyncLogRolling/d9de73af852b586b02bc22d70a1925cf/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T06:44:17,677 INFO [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened d9de73af852b586b02bc22d70a1925cf; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=787997, jitterRate=0.001990199089050293}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-14T06:44:17,677 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for d9de73af852b586b02bc22d70a1925cf 2024-11-14T06:44:17,678 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for d9de73af852b586b02bc22d70a1925cf: Running coprocessor pre-open hook at 1731566657661Writing region info on filesystem at 1731566657661Initializing all the Stores at 1731566657662 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731566657662Cleaning up temporary data from old regions at 1731566657670 (+8 ms)Running coprocessor post-open hooks at 1731566657677 (+7 ms)Region opened successfully at 1731566657678 (+1 ms) 2024-11-14T06:44:17,681 INFO [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1731566657145.d9de73af852b586b02bc22d70a1925cf., pid=6, masterSystemTime=1731566657649 2024-11-14T06:44:17,685 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1731566657145.d9de73af852b586b02bc22d70a1925cf. 2024-11-14T06:44:17,686 INFO [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1731566657145.d9de73af852b586b02bc22d70a1925cf. 2024-11-14T06:44:17,695 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=d9de73af852b586b02bc22d70a1925cf, regionState=OPEN, openSeqNum=2, regionLocation=20680646cf8a,36049,1731566654813 2024-11-14T06:44:17,701 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure d9de73af852b586b02bc22d70a1925cf, server=20680646cf8a,36049,1731566654813 because future has completed 2024-11-14T06:44:17,712 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-14T06:44:17,712 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure d9de73af852b586b02bc22d70a1925cf, server=20680646cf8a,36049,1731566654813 in 212 msec 2024-11-14T06:44:17,717 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-14T06:44:17,717 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=d9de73af852b586b02bc22d70a1925cf, ASSIGN in 384 msec 2024-11-14T06:44:17,719 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-14T06:44:17,720 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731566657720"}]},"ts":"1731566657720"} 2024-11-14T06:44:17,726 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-11-14T06:44:17,728 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-14T06:44:17,733 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 572 msec 2024-11-14T06:44:22,279 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-14T06:44:22,330 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-14T06:44:22,332 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-11-14T06:44:24,529 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-14T06:44:24,529 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-14T06:44:24,531 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-14T06:44:24,531 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-14T06:44:24,532 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T06:44:24,532 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-14T06:44:24,532 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-14T06:44:24,532 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-14T06:44:27,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41891 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-14T06:44:27,266 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-11-14T06:44:27,269 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-11-14T06:44:27,279 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-11-14T06:44:27,280 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1731566657145.d9de73af852b586b02bc22d70a1925cf. 2024-11-14T06:44:27,281 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 20680646cf8a%2C36049%2C1731566654813.1731566667281 2024-11-14T06:44:27,290 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:44:27,291 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:44:27,291 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:44:27,291 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:44:27,291 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:44:27,292 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/WALs/20680646cf8a,36049,1731566654813/20680646cf8a%2C36049%2C1731566654813.1731566656276 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/WALs/20680646cf8a,36049,1731566654813/20680646cf8a%2C36049%2C1731566654813.1731566667281 2024-11-14T06:44:27,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43335 is added to blk_1073741833_1009 (size=451) 2024-11-14T06:44:27,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38983 is added to blk_1073741833_1009 (size=451) 2024-11-14T06:44:27,309 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41679:41679),(127.0.0.1/127.0.0.1:38547:38547)] 2024-11-14T06:44:27,310 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/WALs/20680646cf8a,36049,1731566654813/20680646cf8a%2C36049%2C1731566654813.1731566656276 to hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/oldWALs/20680646cf8a%2C36049%2C1731566654813.1731566656276 2024-11-14T06:44:27,318 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1731566657145.d9de73af852b586b02bc22d70a1925cf., hostname=20680646cf8a,36049,1731566654813, seqNum=2] 2024-11-14T06:44:39,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36049 {}] regionserver.HRegion(8855): Flush requested on d9de73af852b586b02bc22d70a1925cf 2024-11-14T06:44:39,363 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing d9de73af852b586b02bc22d70a1925cf 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-14T06:44:39,419 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/default/TestLogRolling-testSlowSyncLogRolling/d9de73af852b586b02bc22d70a1925cf/.tmp/info/72645344a9794bf9ac85e2b59d87a0f5 is 1080, key is row0001/info:/1731566667322/Put/seqid=0 2024-11-14T06:44:39,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43335 is added to blk_1073741838_1014 (size=12509) 2024-11-14T06:44:39,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38983 is added to blk_1073741838_1014 (size=12509) 2024-11-14T06:44:39,430 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/default/TestLogRolling-testSlowSyncLogRolling/d9de73af852b586b02bc22d70a1925cf/.tmp/info/72645344a9794bf9ac85e2b59d87a0f5 2024-11-14T06:44:39,480 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/default/TestLogRolling-testSlowSyncLogRolling/d9de73af852b586b02bc22d70a1925cf/.tmp/info/72645344a9794bf9ac85e2b59d87a0f5 as hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/default/TestLogRolling-testSlowSyncLogRolling/d9de73af852b586b02bc22d70a1925cf/info/72645344a9794bf9ac85e2b59d87a0f5 2024-11-14T06:44:39,491 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/default/TestLogRolling-testSlowSyncLogRolling/d9de73af852b586b02bc22d70a1925cf/info/72645344a9794bf9ac85e2b59d87a0f5, entries=7, sequenceid=11, filesize=12.2 K 2024-11-14T06:44:39,499 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for d9de73af852b586b02bc22d70a1925cf in 138ms, sequenceid=11, compaction requested=false 2024-11-14T06:44:39,500 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for d9de73af852b586b02bc22d70a1925cf: 2024-11-14T06:44:43,117 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-14T06:44:47,382 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 20680646cf8a%2C36049%2C1731566654813.1731566687381 2024-11-14T06:44:47,600 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 212 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38983,DS-0bbd120e-c309-4bc1-ab39-f74f7cf41f65,DISK], DatanodeInfoWithStorage[127.0.0.1:43335,DS-f95ded65-4dd4-4418-9437-20f9aec080da,DISK]] 2024-11-14T06:44:47,600 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:44:47,600 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:44:47,600 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:44:47,600 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:44:47,600 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:44:47,601 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/WALs/20680646cf8a,36049,1731566654813/20680646cf8a%2C36049%2C1731566654813.1731566667281 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/WALs/20680646cf8a,36049,1731566654813/20680646cf8a%2C36049%2C1731566654813.1731566687381 2024-11-14T06:44:47,602 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38547:38547),(127.0.0.1/127.0.0.1:41679:41679)] 2024-11-14T06:44:47,602 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/WALs/20680646cf8a,36049,1731566654813/20680646cf8a%2C36049%2C1731566654813.1731566667281 is not closed yet, will try archiving it next time 2024-11-14T06:44:47,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43335 is added to blk_1073741837_1013 (size=12399) 2024-11-14T06:44:47,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38983 is added to blk_1073741837_1013 (size=12399) 2024-11-14T06:44:47,806 INFO [FSHLog-0-hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6-prefix:20680646cf8a,36049,1731566654813 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43335,DS-f95ded65-4dd4-4418-9437-20f9aec080da,DISK], DatanodeInfoWithStorage[127.0.0.1:38983,DS-0bbd120e-c309-4bc1-ab39-f74f7cf41f65,DISK]] 2024-11-14T06:44:50,010 INFO [FSHLog-0-hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6-prefix:20680646cf8a,36049,1731566654813 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43335,DS-f95ded65-4dd4-4418-9437-20f9aec080da,DISK], DatanodeInfoWithStorage[127.0.0.1:38983,DS-0bbd120e-c309-4bc1-ab39-f74f7cf41f65,DISK]] 2024-11-14T06:44:52,214 INFO [FSHLog-0-hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6-prefix:20680646cf8a,36049,1731566654813 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43335,DS-f95ded65-4dd4-4418-9437-20f9aec080da,DISK], DatanodeInfoWithStorage[127.0.0.1:38983,DS-0bbd120e-c309-4bc1-ab39-f74f7cf41f65,DISK]] 2024-11-14T06:44:54,418 INFO [FSHLog-0-hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6-prefix:20680646cf8a,36049,1731566654813 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43335,DS-f95ded65-4dd4-4418-9437-20f9aec080da,DISK], DatanodeInfoWithStorage[127.0.0.1:38983,DS-0bbd120e-c309-4bc1-ab39-f74f7cf41f65,DISK]] 2024-11-14T06:44:54,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36049 {}] regionserver.HRegion(8855): Flush requested on d9de73af852b586b02bc22d70a1925cf 2024-11-14T06:44:54,419 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing d9de73af852b586b02bc22d70a1925cf 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-14T06:44:54,621 INFO [FSHLog-0-hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6-prefix:20680646cf8a,36049,1731566654813 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43335,DS-f95ded65-4dd4-4418-9437-20f9aec080da,DISK], DatanodeInfoWithStorage[127.0.0.1:38983,DS-0bbd120e-c309-4bc1-ab39-f74f7cf41f65,DISK]] 2024-11-14T06:44:54,626 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/default/TestLogRolling-testSlowSyncLogRolling/d9de73af852b586b02bc22d70a1925cf/.tmp/info/e4e051d32b274c839b592e2d510ffa38 is 1080, key is row0008/info:/1731566681360/Put/seqid=0 2024-11-14T06:44:54,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38983 is added to blk_1073741840_1016 (size=12509) 2024-11-14T06:44:54,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43335 is added to blk_1073741840_1016 (size=12509) 2024-11-14T06:44:54,639 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/default/TestLogRolling-testSlowSyncLogRolling/d9de73af852b586b02bc22d70a1925cf/.tmp/info/e4e051d32b274c839b592e2d510ffa38 2024-11-14T06:44:54,652 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/default/TestLogRolling-testSlowSyncLogRolling/d9de73af852b586b02bc22d70a1925cf/.tmp/info/e4e051d32b274c839b592e2d510ffa38 as hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/default/TestLogRolling-testSlowSyncLogRolling/d9de73af852b586b02bc22d70a1925cf/info/e4e051d32b274c839b592e2d510ffa38 2024-11-14T06:44:54,665 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/default/TestLogRolling-testSlowSyncLogRolling/d9de73af852b586b02bc22d70a1925cf/info/e4e051d32b274c839b592e2d510ffa38, entries=7, sequenceid=21, filesize=12.2 K 2024-11-14T06:44:54,867 INFO [FSHLog-0-hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6-prefix:20680646cf8a,36049,1731566654813 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43335,DS-f95ded65-4dd4-4418-9437-20f9aec080da,DISK], DatanodeInfoWithStorage[127.0.0.1:38983,DS-0bbd120e-c309-4bc1-ab39-f74f7cf41f65,DISK]] 2024-11-14T06:44:54,867 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for d9de73af852b586b02bc22d70a1925cf in 449ms, sequenceid=21, compaction requested=false 2024-11-14T06:44:54,867 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for d9de73af852b586b02bc22d70a1925cf: 2024-11-14T06:44:54,868 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-11-14T06:44:54,868 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T06:44:54,869 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/default/TestLogRolling-testSlowSyncLogRolling/d9de73af852b586b02bc22d70a1925cf/info/72645344a9794bf9ac85e2b59d87a0f5 because midkey is the same as first or last row 2024-11-14T06:44:56,622 INFO [FSHLog-0-hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6-prefix:20680646cf8a,36049,1731566654813 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43335,DS-f95ded65-4dd4-4418-9437-20f9aec080da,DISK], DatanodeInfoWithStorage[127.0.0.1:38983,DS-0bbd120e-c309-4bc1-ab39-f74f7cf41f65,DISK]] 2024-11-14T06:44:57,038 INFO [master/20680646cf8a:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-14T06:44:57,038 INFO [master/20680646cf8a:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-14T06:44:58,827 INFO [FSHLog-0-hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6-prefix:20680646cf8a,36049,1731566654813 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43335,DS-f95ded65-4dd4-4418-9437-20f9aec080da,DISK], DatanodeInfoWithStorage[127.0.0.1:38983,DS-0bbd120e-c309-4bc1-ab39-f74f7cf41f65,DISK]] 2024-11-14T06:44:58,830 WARN [FSHLog-0-hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6-prefix:20680646cf8a,36049,1731566654813 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43335,DS-f95ded65-4dd4-4418-9437-20f9aec080da,DISK], DatanodeInfoWithStorage[127.0.0.1:38983,DS-0bbd120e-c309-4bc1-ab39-f74f7cf41f65,DISK]] 2024-11-14T06:44:58,831 DEBUG [regionserver/20680646cf8a:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 20680646cf8a%2C36049%2C1731566654813:(num 1731566687381) roll requested 2024-11-14T06:44:58,832 INFO [regionserver/20680646cf8a:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 20680646cf8a%2C36049%2C1731566654813.1731566698832 2024-11-14T06:44:59,046 INFO [regionserver/20680646cf8a:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 210 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43335,DS-f95ded65-4dd4-4418-9437-20f9aec080da,DISK], DatanodeInfoWithStorage[127.0.0.1:38983,DS-0bbd120e-c309-4bc1-ab39-f74f7cf41f65,DISK]] 2024-11-14T06:44:59,046 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:44:59,046 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:44:59,047 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:44:59,047 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:44:59,047 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:44:59,048 INFO [regionserver/20680646cf8a:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/WALs/20680646cf8a,36049,1731566654813/20680646cf8a%2C36049%2C1731566654813.1731566687381 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/WALs/20680646cf8a,36049,1731566654813/20680646cf8a%2C36049%2C1731566654813.1731566698832 2024-11-14T06:44:59,049 DEBUG [regionserver/20680646cf8a:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38547:38547),(127.0.0.1/127.0.0.1:41679:41679)] 2024-11-14T06:44:59,049 DEBUG [regionserver/20680646cf8a:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/WALs/20680646cf8a,36049,1731566654813/20680646cf8a%2C36049%2C1731566654813.1731566687381 is not closed yet, will try archiving it next time 2024-11-14T06:44:59,049 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/WALs/20680646cf8a,36049,1731566654813/20680646cf8a%2C36049%2C1731566654813.1731566667281 to hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/oldWALs/20680646cf8a%2C36049%2C1731566654813.1731566667281 2024-11-14T06:44:59,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38983 is added to blk_1073741839_1015 (size=7739) 2024-11-14T06:44:59,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43335 is added to blk_1073741839_1015 (size=7739) 2024-11-14T06:45:01,032 INFO [FSHLog-0-hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6-prefix:20680646cf8a,36049,1731566654813 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43335,DS-f95ded65-4dd4-4418-9437-20f9aec080da,DISK], DatanodeInfoWithStorage[127.0.0.1:38983,DS-0bbd120e-c309-4bc1-ab39-f74f7cf41f65,DISK]] 2024-11-14T06:45:02,661 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region d9de73af852b586b02bc22d70a1925cf, had cached 0 bytes from a total of 25018 2024-11-14T06:45:03,237 INFO [FSHLog-0-hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6-prefix:20680646cf8a,36049,1731566654813 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43335,DS-f95ded65-4dd4-4418-9437-20f9aec080da,DISK], DatanodeInfoWithStorage[127.0.0.1:38983,DS-0bbd120e-c309-4bc1-ab39-f74f7cf41f65,DISK]] 2024-11-14T06:45:05,441 INFO [FSHLog-0-hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6-prefix:20680646cf8a,36049,1731566654813 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43335,DS-f95ded65-4dd4-4418-9437-20f9aec080da,DISK], DatanodeInfoWithStorage[127.0.0.1:38983,DS-0bbd120e-c309-4bc1-ab39-f74f7cf41f65,DISK]] 2024-11-14T06:45:07,648 INFO [FSHLog-0-hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6-prefix:20680646cf8a,36049,1731566654813 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43335,DS-f95ded65-4dd4-4418-9437-20f9aec080da,DISK], DatanodeInfoWithStorage[127.0.0.1:38983,DS-0bbd120e-c309-4bc1-ab39-f74f7cf41f65,DISK]] 2024-11-14T06:45:09,652 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-14T06:45:09,653 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 20680646cf8a%2C36049%2C1731566654813.1731566709653 2024-11-14T06:45:13,117 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-14T06:45:14,666 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5007 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43335,DS-f95ded65-4dd4-4418-9437-20f9aec080da,DISK], DatanodeInfoWithStorage[127.0.0.1:38983,DS-0bbd120e-c309-4bc1-ab39-f74f7cf41f65,DISK]] 2024-11-14T06:45:14,669 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5007 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43335,DS-f95ded65-4dd4-4418-9437-20f9aec080da,DISK], DatanodeInfoWithStorage[127.0.0.1:38983,DS-0bbd120e-c309-4bc1-ab39-f74f7cf41f65,DISK]] 2024-11-14T06:45:14,669 DEBUG [regionserver/20680646cf8a:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 20680646cf8a%2C36049%2C1731566654813:(num 1731566709653) roll requested 2024-11-14T06:45:14,669 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:45:14,670 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:45:14,670 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:45:14,670 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:45:14,670 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:45:14,671 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/WALs/20680646cf8a,36049,1731566654813/20680646cf8a%2C36049%2C1731566654813.1731566698832 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/WALs/20680646cf8a,36049,1731566654813/20680646cf8a%2C36049%2C1731566654813.1731566709653 2024-11-14T06:45:14,673 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38547:38547),(127.0.0.1/127.0.0.1:41679:41679)] 2024-11-14T06:45:14,673 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/WALs/20680646cf8a,36049,1731566654813/20680646cf8a%2C36049%2C1731566654813.1731566698832 is not closed yet, will try archiving it next time 2024-11-14T06:45:14,673 INFO [regionserver/20680646cf8a:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 20680646cf8a%2C36049%2C1731566654813.1731566714673 2024-11-14T06:45:14,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38983 is added to blk_1073741841_1017 (size=4753) 2024-11-14T06:45:14,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43335 is added to blk_1073741841_1017 (size=4753) 2024-11-14T06:45:19,678 INFO [FSHLog-0-hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6-prefix:20680646cf8a,36049,1731566654813 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43335,DS-f95ded65-4dd4-4418-9437-20f9aec080da,DISK], DatanodeInfoWithStorage[127.0.0.1:38983,DS-0bbd120e-c309-4bc1-ab39-f74f7cf41f65,DISK]] 2024-11-14T06:45:19,678 WARN [FSHLog-0-hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6-prefix:20680646cf8a,36049,1731566654813 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43335,DS-f95ded65-4dd4-4418-9437-20f9aec080da,DISK], DatanodeInfoWithStorage[127.0.0.1:38983,DS-0bbd120e-c309-4bc1-ab39-f74f7cf41f65,DISK]] 2024-11-14T06:45:19,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36049 {}] regionserver.HRegion(8855): Flush requested on d9de73af852b586b02bc22d70a1925cf 2024-11-14T06:45:19,678 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing d9de73af852b586b02bc22d70a1925cf 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-14T06:45:19,688 INFO [regionserver/20680646cf8a:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5006 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43335,DS-f95ded65-4dd4-4418-9437-20f9aec080da,DISK], DatanodeInfoWithStorage[127.0.0.1:38983,DS-0bbd120e-c309-4bc1-ab39-f74f7cf41f65,DISK]] 2024-11-14T06:45:19,688 WARN [regionserver/20680646cf8a:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5006 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43335,DS-f95ded65-4dd4-4418-9437-20f9aec080da,DISK], DatanodeInfoWithStorage[127.0.0.1:38983,DS-0bbd120e-c309-4bc1-ab39-f74f7cf41f65,DISK]] 2024-11-14T06:45:21,680 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-14T06:45:24,682 INFO [FSHLog-0-hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6-prefix:20680646cf8a,36049,1731566654813 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5002 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43335,DS-f95ded65-4dd4-4418-9437-20f9aec080da,DISK], DatanodeInfoWithStorage[127.0.0.1:38983,DS-0bbd120e-c309-4bc1-ab39-f74f7cf41f65,DISK]] 2024-11-14T06:45:24,683 WARN [FSHLog-0-hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6-prefix:20680646cf8a,36049,1731566654813 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5002 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43335,DS-f95ded65-4dd4-4418-9437-20f9aec080da,DISK], DatanodeInfoWithStorage[127.0.0.1:38983,DS-0bbd120e-c309-4bc1-ab39-f74f7cf41f65,DISK]] 2024-11-14T06:45:24,683 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:45:24,684 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:45:24,684 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:45:24,685 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:45:24,685 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:45:24,686 INFO [regionserver/20680646cf8a:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/WALs/20680646cf8a,36049,1731566654813/20680646cf8a%2C36049%2C1731566654813.1731566709653 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/WALs/20680646cf8a,36049,1731566654813/20680646cf8a%2C36049%2C1731566654813.1731566714673 2024-11-14T06:45:24,688 DEBUG [regionserver/20680646cf8a:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38547:38547),(127.0.0.1/127.0.0.1:41679:41679)] 2024-11-14T06:45:24,689 DEBUG [regionserver/20680646cf8a:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/WALs/20680646cf8a,36049,1731566654813/20680646cf8a%2C36049%2C1731566654813.1731566709653 is not closed yet, will try archiving it next time 2024-11-14T06:45:24,689 DEBUG [regionserver/20680646cf8a:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 20680646cf8a%2C36049%2C1731566654813:(num 1731566714673) roll requested 2024-11-14T06:45:24,690 INFO [regionserver/20680646cf8a:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 20680646cf8a%2C36049%2C1731566654813.1731566724689 2024-11-14T06:45:24,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38983 is added to blk_1073741842_1018 (size=1569) 2024-11-14T06:45:24,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43335 is added to blk_1073741842_1018 (size=1569) 2024-11-14T06:45:24,693 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/default/TestLogRolling-testSlowSyncLogRolling/d9de73af852b586b02bc22d70a1925cf/.tmp/info/7c73ec10de9948309b2ddada6b97aa5c is 1080, key is row0015/info:/1731566696421/Put/seqid=0 2024-11-14T06:45:24,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43335 is added to blk_1073741844_1020 (size=12509) 2024-11-14T06:45:24,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38983 is added to blk_1073741844_1020 (size=12509) 2024-11-14T06:45:24,701 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/default/TestLogRolling-testSlowSyncLogRolling/d9de73af852b586b02bc22d70a1925cf/.tmp/info/7c73ec10de9948309b2ddada6b97aa5c 2024-11-14T06:45:24,710 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/default/TestLogRolling-testSlowSyncLogRolling/d9de73af852b586b02bc22d70a1925cf/.tmp/info/7c73ec10de9948309b2ddada6b97aa5c as hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/default/TestLogRolling-testSlowSyncLogRolling/d9de73af852b586b02bc22d70a1925cf/info/7c73ec10de9948309b2ddada6b97aa5c 2024-11-14T06:45:24,720 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/default/TestLogRolling-testSlowSyncLogRolling/d9de73af852b586b02bc22d70a1925cf/info/7c73ec10de9948309b2ddada6b97aa5c, entries=7, sequenceid=31, filesize=12.2 K 2024-11-14T06:45:29,704 INFO [regionserver/20680646cf8a:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5010 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43335,DS-f95ded65-4dd4-4418-9437-20f9aec080da,DISK], DatanodeInfoWithStorage[127.0.0.1:38983,DS-0bbd120e-c309-4bc1-ab39-f74f7cf41f65,DISK]] 2024-11-14T06:45:29,704 WARN [regionserver/20680646cf8a:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5010 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43335,DS-f95ded65-4dd4-4418-9437-20f9aec080da,DISK], DatanodeInfoWithStorage[127.0.0.1:38983,DS-0bbd120e-c309-4bc1-ab39-f74f7cf41f65,DISK]] 2024-11-14T06:45:29,722 INFO [FSHLog-0-hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6-prefix:20680646cf8a,36049,1731566654813 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43335,DS-f95ded65-4dd4-4418-9437-20f9aec080da,DISK], DatanodeInfoWithStorage[127.0.0.1:38983,DS-0bbd120e-c309-4bc1-ab39-f74f7cf41f65,DISK]] 2024-11-14T06:45:29,722 WARN [FSHLog-0-hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6-prefix:20680646cf8a,36049,1731566654813 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43335,DS-f95ded65-4dd4-4418-9437-20f9aec080da,DISK], DatanodeInfoWithStorage[127.0.0.1:38983,DS-0bbd120e-c309-4bc1-ab39-f74f7cf41f65,DISK]] 2024-11-14T06:45:29,722 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for d9de73af852b586b02bc22d70a1925cf in 10044ms, sequenceid=31, compaction requested=true 2024-11-14T06:45:29,723 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:45:29,723 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for d9de73af852b586b02bc22d70a1925cf: 2024-11-14T06:45:29,723 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:45:29,723 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-11-14T06:45:29,723 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T06:45:29,723 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:45:29,723 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/default/TestLogRolling-testSlowSyncLogRolling/d9de73af852b586b02bc22d70a1925cf/info/72645344a9794bf9ac85e2b59d87a0f5 because midkey is the same as first or last row 2024-11-14T06:45:29,724 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:45:29,724 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:45:29,724 INFO [regionserver/20680646cf8a:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/WALs/20680646cf8a,36049,1731566654813/20680646cf8a%2C36049%2C1731566654813.1731566714673 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/WALs/20680646cf8a,36049,1731566654813/20680646cf8a%2C36049%2C1731566654813.1731566724689 2024-11-14T06:45:29,726 DEBUG [regionserver/20680646cf8a:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41679:41679),(127.0.0.1/127.0.0.1:38547:38547)] 2024-11-14T06:45:29,727 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d9de73af852b586b02bc22d70a1925cf:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T06:45:29,727 DEBUG [regionserver/20680646cf8a:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/WALs/20680646cf8a,36049,1731566654813/20680646cf8a%2C36049%2C1731566654813.1731566714673 is not closed yet, will try archiving it next time 2024-11-14T06:45:29,727 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/WALs/20680646cf8a,36049,1731566654813/20680646cf8a%2C36049%2C1731566654813.1731566687381 to hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/oldWALs/20680646cf8a%2C36049%2C1731566654813.1731566687381 2024-11-14T06:45:29,727 DEBUG [regionserver/20680646cf8a:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 20680646cf8a%2C36049%2C1731566654813:(num 1731566729727) roll requested 2024-11-14T06:45:29,728 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 20680646cf8a%2C36049%2C1731566654813.1731566729727 2024-11-14T06:45:29,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38983 is added to blk_1073741843_1019 (size=438) 2024-11-14T06:45:29,730 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/WALs/20680646cf8a,36049,1731566654813/20680646cf8a%2C36049%2C1731566654813.1731566698832 to hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/oldWALs/20680646cf8a%2C36049%2C1731566654813.1731566698832 2024-11-14T06:45:29,730 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T06:45:29,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43335 is added to blk_1073741843_1019 (size=438) 2024-11-14T06:45:29,731 DEBUG [RS:0;20680646cf8a:36049-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T06:45:29,732 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/WALs/20680646cf8a,36049,1731566654813/20680646cf8a%2C36049%2C1731566654813.1731566709653 to hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/oldWALs/20680646cf8a%2C36049%2C1731566654813.1731566709653 2024-11-14T06:45:29,733 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/WALs/20680646cf8a,36049,1731566654813/20680646cf8a%2C36049%2C1731566654813.1731566714673 to hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/oldWALs/20680646cf8a%2C36049%2C1731566654813.1731566714673 2024-11-14T06:45:29,734 DEBUG [RS:0;20680646cf8a:36049-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T06:45:29,735 DEBUG [RS:0;20680646cf8a:36049-shortCompactions-0 {}] regionserver.HStore(1541): d9de73af852b586b02bc22d70a1925cf/info is initiating minor compaction (all files) 2024-11-14T06:45:29,736 INFO [RS:0;20680646cf8a:36049-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of d9de73af852b586b02bc22d70a1925cf/info in TestLogRolling-testSlowSyncLogRolling,,1731566657145.d9de73af852b586b02bc22d70a1925cf. 2024-11-14T06:45:29,736 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:45:29,736 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:45:29,736 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:45:29,736 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:45:29,736 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:45:29,736 INFO [RS:0;20680646cf8a:36049-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/default/TestLogRolling-testSlowSyncLogRolling/d9de73af852b586b02bc22d70a1925cf/info/72645344a9794bf9ac85e2b59d87a0f5, hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/default/TestLogRolling-testSlowSyncLogRolling/d9de73af852b586b02bc22d70a1925cf/info/e4e051d32b274c839b592e2d510ffa38, hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/default/TestLogRolling-testSlowSyncLogRolling/d9de73af852b586b02bc22d70a1925cf/info/7c73ec10de9948309b2ddada6b97aa5c] into tmpdir=hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/default/TestLogRolling-testSlowSyncLogRolling/d9de73af852b586b02bc22d70a1925cf/.tmp, totalSize=36.6 K 2024-11-14T06:45:29,736 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/WALs/20680646cf8a,36049,1731566654813/20680646cf8a%2C36049%2C1731566654813.1731566724689 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/WALs/20680646cf8a,36049,1731566654813/20680646cf8a%2C36049%2C1731566654813.1731566729727 2024-11-14T06:45:29,737 DEBUG [RS:0;20680646cf8a:36049-shortCompactions-0 {}] compactions.Compactor(225): Compacting 72645344a9794bf9ac85e2b59d87a0f5, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731566667322 2024-11-14T06:45:29,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43335 is added to blk_1073741845_1021 (size=93) 2024-11-14T06:45:29,739 DEBUG [RS:0;20680646cf8a:36049-shortCompactions-0 {}] compactions.Compactor(225): Compacting e4e051d32b274c839b592e2d510ffa38, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1731566681360 2024-11-14T06:45:29,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38983 is added to blk_1073741845_1021 (size=93) 2024-11-14T06:45:29,739 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/WALs/20680646cf8a,36049,1731566654813/20680646cf8a%2C36049%2C1731566654813.1731566724689 to hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/oldWALs/20680646cf8a%2C36049%2C1731566654813.1731566724689 2024-11-14T06:45:29,739 DEBUG [RS:0;20680646cf8a:36049-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7c73ec10de9948309b2ddada6b97aa5c, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1731566696421 2024-11-14T06:45:29,745 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38547:38547),(127.0.0.1/127.0.0.1:41679:41679)] 2024-11-14T06:45:29,745 INFO [regionserver/20680646cf8a:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 20680646cf8a%2C36049%2C1731566654813.1731566729745 2024-11-14T06:45:29,754 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:45:29,754 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:45:29,754 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:45:29,754 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:45:29,755 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:45:29,755 INFO [regionserver/20680646cf8a:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/WALs/20680646cf8a,36049,1731566654813/20680646cf8a%2C36049%2C1731566654813.1731566729727 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/WALs/20680646cf8a,36049,1731566654813/20680646cf8a%2C36049%2C1731566654813.1731566729745 2024-11-14T06:45:29,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38983 is added to blk_1073741846_1022 (size=1258) 2024-11-14T06:45:29,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43335 is added to blk_1073741846_1022 (size=1258) 2024-11-14T06:45:29,762 DEBUG [regionserver/20680646cf8a:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41679:41679),(127.0.0.1/127.0.0.1:38547:38547)] 2024-11-14T06:45:29,771 INFO [RS:0;20680646cf8a:36049-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d9de73af852b586b02bc22d70a1925cf#info#compaction#3 average throughput is 21.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T06:45:29,772 DEBUG [RS:0;20680646cf8a:36049-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/default/TestLogRolling-testSlowSyncLogRolling/d9de73af852b586b02bc22d70a1925cf/.tmp/info/1b26715228e944939cb782f5dc4e3d3d is 1080, key is row0001/info:/1731566667322/Put/seqid=0 2024-11-14T06:45:29,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38983 is added to blk_1073741848_1024 (size=27710) 2024-11-14T06:45:29,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43335 is added to blk_1073741848_1024 (size=27710) 2024-11-14T06:45:29,788 DEBUG [RS:0;20680646cf8a:36049-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/default/TestLogRolling-testSlowSyncLogRolling/d9de73af852b586b02bc22d70a1925cf/.tmp/info/1b26715228e944939cb782f5dc4e3d3d as hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/default/TestLogRolling-testSlowSyncLogRolling/d9de73af852b586b02bc22d70a1925cf/info/1b26715228e944939cb782f5dc4e3d3d 2024-11-14T06:45:29,803 INFO [RS:0;20680646cf8a:36049-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in d9de73af852b586b02bc22d70a1925cf/info of d9de73af852b586b02bc22d70a1925cf into 1b26715228e944939cb782f5dc4e3d3d(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T06:45:29,803 DEBUG [RS:0;20680646cf8a:36049-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for d9de73af852b586b02bc22d70a1925cf: 2024-11-14T06:45:29,804 INFO [RS:0;20680646cf8a:36049-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1731566657145.d9de73af852b586b02bc22d70a1925cf., storeName=d9de73af852b586b02bc22d70a1925cf/info, priority=13, startTime=1731566729726; duration=0sec 2024-11-14T06:45:29,804 DEBUG [RS:0;20680646cf8a:36049-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-14T06:45:29,804 DEBUG [RS:0;20680646cf8a:36049-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T06:45:29,804 DEBUG [RS:0;20680646cf8a:36049-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/default/TestLogRolling-testSlowSyncLogRolling/d9de73af852b586b02bc22d70a1925cf/info/1b26715228e944939cb782f5dc4e3d3d because midkey is the same as first or last row 2024-11-14T06:45:29,805 DEBUG [RS:0;20680646cf8a:36049-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-14T06:45:29,805 DEBUG [RS:0;20680646cf8a:36049-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T06:45:29,805 DEBUG [RS:0;20680646cf8a:36049-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/default/TestLogRolling-testSlowSyncLogRolling/d9de73af852b586b02bc22d70a1925cf/info/1b26715228e944939cb782f5dc4e3d3d because midkey is the same as first or last row 2024-11-14T06:45:29,805 DEBUG [RS:0;20680646cf8a:36049-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-14T06:45:29,805 DEBUG [RS:0;20680646cf8a:36049-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T06:45:29,805 DEBUG [RS:0;20680646cf8a:36049-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/default/TestLogRolling-testSlowSyncLogRolling/d9de73af852b586b02bc22d70a1925cf/info/1b26715228e944939cb782f5dc4e3d3d because midkey is the same as first or last row 2024-11-14T06:45:29,805 DEBUG [RS:0;20680646cf8a:36049-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T06:45:29,806 DEBUG [RS:0;20680646cf8a:36049-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d9de73af852b586b02bc22d70a1925cf:info 2024-11-14T06:45:41,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36049 {}] regionserver.HRegion(8855): Flush requested on d9de73af852b586b02bc22d70a1925cf 2024-11-14T06:45:41,790 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing d9de73af852b586b02bc22d70a1925cf 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-14T06:45:41,801 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/default/TestLogRolling-testSlowSyncLogRolling/d9de73af852b586b02bc22d70a1925cf/.tmp/info/059fe89f32dd4a3bb7f91f3d960b2f5b is 1080, key is row0022/info:/1731566729747/Put/seqid=0 2024-11-14T06:45:41,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38983 is added to blk_1073741849_1025 (size=12509) 2024-11-14T06:45:41,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43335 is added to blk_1073741849_1025 (size=12509) 2024-11-14T06:45:41,809 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/default/TestLogRolling-testSlowSyncLogRolling/d9de73af852b586b02bc22d70a1925cf/.tmp/info/059fe89f32dd4a3bb7f91f3d960b2f5b 2024-11-14T06:45:41,819 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/default/TestLogRolling-testSlowSyncLogRolling/d9de73af852b586b02bc22d70a1925cf/.tmp/info/059fe89f32dd4a3bb7f91f3d960b2f5b as hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/default/TestLogRolling-testSlowSyncLogRolling/d9de73af852b586b02bc22d70a1925cf/info/059fe89f32dd4a3bb7f91f3d960b2f5b 2024-11-14T06:45:41,830 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/default/TestLogRolling-testSlowSyncLogRolling/d9de73af852b586b02bc22d70a1925cf/info/059fe89f32dd4a3bb7f91f3d960b2f5b, entries=7, sequenceid=42, filesize=12.2 K 2024-11-14T06:45:41,832 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for d9de73af852b586b02bc22d70a1925cf in 42ms, sequenceid=42, compaction requested=false 2024-11-14T06:45:41,832 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for d9de73af852b586b02bc22d70a1925cf: 2024-11-14T06:45:41,832 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-11-14T06:45:41,832 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T06:45:41,832 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/default/TestLogRolling-testSlowSyncLogRolling/d9de73af852b586b02bc22d70a1925cf/info/1b26715228e944939cb782f5dc4e3d3d because midkey is the same as first or last row 2024-11-14T06:45:43,118 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-14T06:45:47,662 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region d9de73af852b586b02bc22d70a1925cf, had cached 0 bytes from a total of 40219 2024-11-14T06:45:49,815 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-14T06:45:49,816 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T06:45:49,817 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T06:45:49,827 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T06:45:49,828 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T06:45:49,828 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-14T06:45:49,828 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-14T06:45:49,828 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1718951232, stopped=false 2024-11-14T06:45:49,828 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=20680646cf8a,41891,1731566654142 2024-11-14T06:45:49,830 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41891-0x1003cf9ab910000, quorum=127.0.0.1:61515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T06:45:49,830 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41891-0x1003cf9ab910000, quorum=127.0.0.1:61515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:45:49,830 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36049-0x1003cf9ab910001, quorum=127.0.0.1:61515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T06:45:49,830 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T06:45:49,830 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36049-0x1003cf9ab910001, quorum=127.0.0.1:61515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:45:49,830 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T06:45:49,830 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T06:45:49,830 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41891-0x1003cf9ab910000, quorum=127.0.0.1:61515, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T06:45:49,830 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T06:45:49,830 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36049-0x1003cf9ab910001, quorum=127.0.0.1:61515, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T06:45:49,831 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '20680646cf8a,36049,1731566654813' ***** 2024-11-14T06:45:49,831 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-14T06:45:49,831 INFO [RS:0;20680646cf8a:36049 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-14T06:45:49,831 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-14T06:45:49,831 INFO [RS:0;20680646cf8a:36049 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-14T06:45:49,831 INFO [RS:0;20680646cf8a:36049 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-14T06:45:49,832 INFO [RS:0;20680646cf8a:36049 {}] regionserver.HRegionServer(3091): Received CLOSE for d9de73af852b586b02bc22d70a1925cf 2024-11-14T06:45:49,832 INFO [RS:0;20680646cf8a:36049 {}] regionserver.HRegionServer(959): stopping server 20680646cf8a,36049,1731566654813 2024-11-14T06:45:49,832 INFO [RS:0;20680646cf8a:36049 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T06:45:49,832 INFO [RS:0;20680646cf8a:36049 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;20680646cf8a:36049. 2024-11-14T06:45:49,832 DEBUG [RS:0;20680646cf8a:36049 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T06:45:49,832 DEBUG [RS:0;20680646cf8a:36049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T06:45:49,832 DEBUG [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing d9de73af852b586b02bc22d70a1925cf, disabling compactions & flushes 2024-11-14T06:45:49,832 INFO [RS:0;20680646cf8a:36049 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-14T06:45:49,832 INFO [RS:0;20680646cf8a:36049 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-14T06:45:49,832 INFO [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731566657145.d9de73af852b586b02bc22d70a1925cf. 2024-11-14T06:45:49,832 INFO [RS:0;20680646cf8a:36049 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-14T06:45:49,833 DEBUG [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731566657145.d9de73af852b586b02bc22d70a1925cf. 2024-11-14T06:45:49,833 INFO [RS:0;20680646cf8a:36049 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-14T06:45:49,833 DEBUG [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731566657145.d9de73af852b586b02bc22d70a1925cf. after waiting 0 ms 2024-11-14T06:45:49,833 DEBUG [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731566657145.d9de73af852b586b02bc22d70a1925cf. 2024-11-14T06:45:49,833 INFO [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing d9de73af852b586b02bc22d70a1925cf 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-14T06:45:49,833 INFO [RS:0;20680646cf8a:36049 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-14T06:45:49,833 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T06:45:49,833 DEBUG [RS:0;20680646cf8a:36049 {}] regionserver.HRegionServer(1325): Online Regions={d9de73af852b586b02bc22d70a1925cf=TestLogRolling-testSlowSyncLogRolling,,1731566657145.d9de73af852b586b02bc22d70a1925cf., 1588230740=hbase:meta,,1.1588230740} 2024-11-14T06:45:49,833 INFO [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T06:45:49,833 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T06:45:49,833 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T06:45:49,833 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T06:45:49,833 DEBUG [RS:0;20680646cf8a:36049 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, d9de73af852b586b02bc22d70a1925cf 2024-11-14T06:45:49,833 INFO [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-11-14T06:45:49,838 DEBUG [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/default/TestLogRolling-testSlowSyncLogRolling/d9de73af852b586b02bc22d70a1925cf/.tmp/info/dd757c86cd3b46b5982ef6f87eaf890f is 1080, key is row0029/info:/1731566743794/Put/seqid=0 2024-11-14T06:45:49,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38983 is added to blk_1073741850_1026 (size=8193) 2024-11-14T06:45:49,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43335 is added to blk_1073741850_1026 (size=8193) 2024-11-14T06:45:49,846 INFO [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/default/TestLogRolling-testSlowSyncLogRolling/d9de73af852b586b02bc22d70a1925cf/.tmp/info/dd757c86cd3b46b5982ef6f87eaf890f 2024-11-14T06:45:49,855 DEBUG [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/default/TestLogRolling-testSlowSyncLogRolling/d9de73af852b586b02bc22d70a1925cf/.tmp/info/dd757c86cd3b46b5982ef6f87eaf890f as hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/default/TestLogRolling-testSlowSyncLogRolling/d9de73af852b586b02bc22d70a1925cf/info/dd757c86cd3b46b5982ef6f87eaf890f 2024-11-14T06:45:49,862 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/hbase/meta/1588230740/.tmp/info/8ff0835e12954cbc8672950dd91ab848 is 195, key is TestLogRolling-testSlowSyncLogRolling,,1731566657145.d9de73af852b586b02bc22d70a1925cf./info:regioninfo/1731566657694/Put/seqid=0 2024-11-14T06:45:49,864 INFO [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/default/TestLogRolling-testSlowSyncLogRolling/d9de73af852b586b02bc22d70a1925cf/info/dd757c86cd3b46b5982ef6f87eaf890f, entries=3, sequenceid=48, filesize=8.0 K 2024-11-14T06:45:49,866 INFO [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for d9de73af852b586b02bc22d70a1925cf in 33ms, sequenceid=48, compaction requested=true 2024-11-14T06:45:49,867 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731566657145.d9de73af852b586b02bc22d70a1925cf.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/default/TestLogRolling-testSlowSyncLogRolling/d9de73af852b586b02bc22d70a1925cf/info/72645344a9794bf9ac85e2b59d87a0f5, hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/default/TestLogRolling-testSlowSyncLogRolling/d9de73af852b586b02bc22d70a1925cf/info/e4e051d32b274c839b592e2d510ffa38, hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/default/TestLogRolling-testSlowSyncLogRolling/d9de73af852b586b02bc22d70a1925cf/info/7c73ec10de9948309b2ddada6b97aa5c] to archive 2024-11-14T06:45:49,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38983 is added to blk_1073741851_1027 (size=7016) 2024-11-14T06:45:49,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43335 is added to blk_1073741851_1027 (size=7016) 2024-11-14T06:45:49,870 INFO [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/hbase/meta/1588230740/.tmp/info/8ff0835e12954cbc8672950dd91ab848 2024-11-14T06:45:49,870 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731566657145.d9de73af852b586b02bc22d70a1925cf.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-14T06:45:49,873 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731566657145.d9de73af852b586b02bc22d70a1925cf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/default/TestLogRolling-testSlowSyncLogRolling/d9de73af852b586b02bc22d70a1925cf/info/72645344a9794bf9ac85e2b59d87a0f5 to hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/archive/data/default/TestLogRolling-testSlowSyncLogRolling/d9de73af852b586b02bc22d70a1925cf/info/72645344a9794bf9ac85e2b59d87a0f5 2024-11-14T06:45:49,875 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731566657145.d9de73af852b586b02bc22d70a1925cf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/default/TestLogRolling-testSlowSyncLogRolling/d9de73af852b586b02bc22d70a1925cf/info/e4e051d32b274c839b592e2d510ffa38 to hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/archive/data/default/TestLogRolling-testSlowSyncLogRolling/d9de73af852b586b02bc22d70a1925cf/info/e4e051d32b274c839b592e2d510ffa38 2024-11-14T06:45:49,878 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731566657145.d9de73af852b586b02bc22d70a1925cf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/default/TestLogRolling-testSlowSyncLogRolling/d9de73af852b586b02bc22d70a1925cf/info/7c73ec10de9948309b2ddada6b97aa5c to hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/archive/data/default/TestLogRolling-testSlowSyncLogRolling/d9de73af852b586b02bc22d70a1925cf/info/7c73ec10de9948309b2ddada6b97aa5c 2024-11-14T06:45:49,894 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/hbase/meta/1588230740/.tmp/ns/3d122e483bd943ba8b5d365bd99cac4b is 43, key is default/ns:d/1731566656916/Put/seqid=0 2024-11-14T06:45:49,893 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731566657145.d9de73af852b586b02bc22d70a1925cf.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=20680646cf8a:41891 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-14T06:45:49,899 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731566657145.d9de73af852b586b02bc22d70a1925cf.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [72645344a9794bf9ac85e2b59d87a0f5=12509, e4e051d32b274c839b592e2d510ffa38=12509, 7c73ec10de9948309b2ddada6b97aa5c=12509] 2024-11-14T06:45:49,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43335 is added to blk_1073741852_1028 (size=5153) 2024-11-14T06:45:49,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38983 is added to blk_1073741852_1028 (size=5153) 2024-11-14T06:45:49,901 INFO [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/hbase/meta/1588230740/.tmp/ns/3d122e483bd943ba8b5d365bd99cac4b 2024-11-14T06:45:49,905 DEBUG [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/default/TestLogRolling-testSlowSyncLogRolling/d9de73af852b586b02bc22d70a1925cf/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-11-14T06:45:49,908 INFO [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731566657145.d9de73af852b586b02bc22d70a1925cf. 2024-11-14T06:45:49,908 DEBUG [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for d9de73af852b586b02bc22d70a1925cf: Waiting for close lock at 1731566749832Running coprocessor pre-close hooks at 1731566749832Disabling compacts and flushes for region at 1731566749832Disabling writes for close at 1731566749833 (+1 ms)Obtaining lock to block concurrent updates at 1731566749833Preparing flush snapshotting stores in d9de73af852b586b02bc22d70a1925cf at 1731566749833Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1731566657145.d9de73af852b586b02bc22d70a1925cf., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1731566749833Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1731566657145.d9de73af852b586b02bc22d70a1925cf. at 1731566749834 (+1 ms)Flushing d9de73af852b586b02bc22d70a1925cf/info: creating writer at 1731566749834Flushing d9de73af852b586b02bc22d70a1925cf/info: appending metadata at 1731566749838 (+4 ms)Flushing d9de73af852b586b02bc22d70a1925cf/info: closing flushed file at 1731566749838Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@74121f: reopening flushed file at 1731566749854 (+16 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for d9de73af852b586b02bc22d70a1925cf in 33ms, sequenceid=48, compaction requested=true at 1731566749866 (+12 ms)Writing region close event to WAL at 1731566749901 (+35 ms)Running coprocessor post-close hooks at 1731566749906 (+5 ms)Closed at 1731566749908 (+2 ms) 2024-11-14T06:45:49,909 DEBUG [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1731566657145.d9de73af852b586b02bc22d70a1925cf. 2024-11-14T06:45:49,924 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/hbase/meta/1588230740/.tmp/table/3b72b495d9e241269483f608d599b715 is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1731566657720/Put/seqid=0 2024-11-14T06:45:49,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43335 is added to blk_1073741853_1029 (size=5396) 2024-11-14T06:45:49,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38983 is added to blk_1073741853_1029 (size=5396) 2024-11-14T06:45:49,931 INFO [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/hbase/meta/1588230740/.tmp/table/3b72b495d9e241269483f608d599b715 2024-11-14T06:45:49,939 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/hbase/meta/1588230740/.tmp/info/8ff0835e12954cbc8672950dd91ab848 as hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/hbase/meta/1588230740/info/8ff0835e12954cbc8672950dd91ab848 2024-11-14T06:45:49,948 INFO [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/hbase/meta/1588230740/info/8ff0835e12954cbc8672950dd91ab848, entries=10, sequenceid=11, filesize=6.9 K 2024-11-14T06:45:49,949 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/hbase/meta/1588230740/.tmp/ns/3d122e483bd943ba8b5d365bd99cac4b as hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/hbase/meta/1588230740/ns/3d122e483bd943ba8b5d365bd99cac4b 2024-11-14T06:45:49,958 INFO [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/hbase/meta/1588230740/ns/3d122e483bd943ba8b5d365bd99cac4b, entries=2, sequenceid=11, filesize=5.0 K 2024-11-14T06:45:49,959 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/hbase/meta/1588230740/.tmp/table/3b72b495d9e241269483f608d599b715 as hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/hbase/meta/1588230740/table/3b72b495d9e241269483f608d599b715 2024-11-14T06:45:49,967 INFO [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/hbase/meta/1588230740/table/3b72b495d9e241269483f608d599b715, entries=2, sequenceid=11, filesize=5.3 K 2024-11-14T06:45:49,969 INFO [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 135ms, sequenceid=11, compaction requested=false 2024-11-14T06:45:49,974 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-14T06:45:49,975 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T06:45:49,975 INFO [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T06:45:49,976 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731566749833Running coprocessor pre-close hooks at 1731566749833Disabling compacts and flushes for region at 1731566749833Disabling writes for close at 1731566749833Obtaining lock to block concurrent updates at 1731566749833Preparing flush snapshotting stores in 1588230740 at 1731566749833Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1731566749834 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731566749835 (+1 ms)Flushing 1588230740/info: creating writer at 1731566749835Flushing 1588230740/info: appending metadata at 1731566749861 (+26 ms)Flushing 1588230740/info: closing flushed file at 1731566749861Flushing 1588230740/ns: creating writer at 1731566749878 (+17 ms)Flushing 1588230740/ns: appending metadata at 1731566749893 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1731566749893Flushing 1588230740/table: creating writer at 1731566749909 (+16 ms)Flushing 1588230740/table: appending metadata at 1731566749923 (+14 ms)Flushing 1588230740/table: closing flushed file at 1731566749923Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@91d7d8b: reopening flushed file at 1731566749938 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@bf77160: reopening flushed file at 1731566749948 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@19a40efa: reopening flushed file at 1731566749958 (+10 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 135ms, sequenceid=11, compaction requested=false at 1731566749969 (+11 ms)Writing region close event to WAL at 1731566749970 (+1 ms)Running coprocessor post-close hooks at 1731566749975 (+5 ms)Closed at 1731566749975 2024-11-14T06:45:49,976 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-14T06:45:50,034 INFO [RS:0;20680646cf8a:36049 {}] regionserver.HRegionServer(976): stopping server 20680646cf8a,36049,1731566654813; all regions closed. 2024-11-14T06:45:50,035 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:45:50,035 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:45:50,035 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:45:50,035 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:45:50,035 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:45:50,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43335 is added to blk_1073741834_1010 (size=3066) 2024-11-14T06:45:50,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38983 is added to blk_1073741834_1010 (size=3066) 2024-11-14T06:45:50,042 DEBUG [RS:0;20680646cf8a:36049 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/oldWALs 2024-11-14T06:45:50,042 INFO [RS:0;20680646cf8a:36049 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 20680646cf8a%2C36049%2C1731566654813.meta:.meta(num 1731566656742) 2024-11-14T06:45:50,042 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:45:50,042 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:45:50,043 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:45:50,043 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:45:50,043 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:45:50,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43335 is added to blk_1073741847_1023 (size=12695) 2024-11-14T06:45:50,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38983 is added to blk_1073741847_1023 (size=12695) 2024-11-14T06:45:50,049 DEBUG [RS:0;20680646cf8a:36049 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/oldWALs 2024-11-14T06:45:50,049 INFO [RS:0;20680646cf8a:36049 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 20680646cf8a%2C36049%2C1731566654813:(num 1731566729745) 2024-11-14T06:45:50,049 DEBUG [RS:0;20680646cf8a:36049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T06:45:50,049 INFO [RS:0;20680646cf8a:36049 {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T06:45:50,049 INFO [RS:0;20680646cf8a:36049 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T06:45:50,049 INFO [RS:0;20680646cf8a:36049 {}] hbase.ChoreService(370): Chore service for: regionserver/20680646cf8a:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-14T06:45:50,049 INFO [RS:0;20680646cf8a:36049 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T06:45:50,049 INFO [regionserver/20680646cf8a:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T06:45:50,050 INFO [RS:0;20680646cf8a:36049 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36049 2024-11-14T06:45:50,052 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41891-0x1003cf9ab910000, quorum=127.0.0.1:61515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T06:45:50,052 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36049-0x1003cf9ab910001, quorum=127.0.0.1:61515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/20680646cf8a,36049,1731566654813 2024-11-14T06:45:50,053 INFO [RS:0;20680646cf8a:36049 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T06:45:50,054 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [20680646cf8a,36049,1731566654813] 2024-11-14T06:45:50,055 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/20680646cf8a,36049,1731566654813 already deleted, retry=false 2024-11-14T06:45:50,055 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 20680646cf8a,36049,1731566654813 expired; onlineServers=0 2024-11-14T06:45:50,055 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '20680646cf8a,41891,1731566654142' ***** 2024-11-14T06:45:50,055 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-14T06:45:50,056 INFO [M:0;20680646cf8a:41891 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T06:45:50,056 INFO [M:0;20680646cf8a:41891 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T06:45:50,056 DEBUG [M:0;20680646cf8a:41891 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-14T06:45:50,056 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-14T06:45:50,056 DEBUG [M:0;20680646cf8a:41891 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-14T06:45:50,056 DEBUG [master/20680646cf8a:0:becomeActiveMaster-HFileCleaner.large.0-1731566655952 {}] cleaner.HFileCleaner(306): Exit Thread[master/20680646cf8a:0:becomeActiveMaster-HFileCleaner.large.0-1731566655952,5,FailOnTimeoutGroup] 2024-11-14T06:45:50,056 DEBUG [master/20680646cf8a:0:becomeActiveMaster-HFileCleaner.small.0-1731566655965 {}] cleaner.HFileCleaner(306): Exit Thread[master/20680646cf8a:0:becomeActiveMaster-HFileCleaner.small.0-1731566655965,5,FailOnTimeoutGroup] 2024-11-14T06:45:50,056 INFO [M:0;20680646cf8a:41891 {}] hbase.ChoreService(370): Chore service for: master/20680646cf8a:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-14T06:45:50,056 INFO [M:0;20680646cf8a:41891 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T06:45:50,057 DEBUG [M:0;20680646cf8a:41891 {}] master.HMaster(1795): Stopping service threads 2024-11-14T06:45:50,057 INFO [M:0;20680646cf8a:41891 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-14T06:45:50,057 INFO [M:0;20680646cf8a:41891 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T06:45:50,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41891-0x1003cf9ab910000, quorum=127.0.0.1:61515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-14T06:45:50,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41891-0x1003cf9ab910000, quorum=127.0.0.1:61515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:45:50,057 INFO [M:0;20680646cf8a:41891 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-14T06:45:50,057 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-14T06:45:50,058 DEBUG [M:0;20680646cf8a:41891 {}] zookeeper.ZKUtil(347): master:41891-0x1003cf9ab910000, quorum=127.0.0.1:61515, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-14T06:45:50,058 WARN [M:0;20680646cf8a:41891 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-14T06:45:50,059 INFO [M:0;20680646cf8a:41891 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/.lastflushedseqids 2024-11-14T06:45:50,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38983 is added to blk_1073741854_1030 (size=130) 2024-11-14T06:45:50,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43335 is added to blk_1073741854_1030 (size=130) 2024-11-14T06:45:50,070 INFO [M:0;20680646cf8a:41891 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-14T06:45:50,071 INFO [M:0;20680646cf8a:41891 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-14T06:45:50,071 DEBUG [M:0;20680646cf8a:41891 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T06:45:50,071 INFO [M:0;20680646cf8a:41891 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T06:45:50,071 DEBUG [M:0;20680646cf8a:41891 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T06:45:50,071 DEBUG [M:0;20680646cf8a:41891 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T06:45:50,071 DEBUG [M:0;20680646cf8a:41891 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T06:45:50,071 INFO [M:0;20680646cf8a:41891 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.04 KB heapSize=29.21 KB 2024-11-14T06:45:50,090 DEBUG [M:0;20680646cf8a:41891 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/640831f8450f475f8953148a5c8b9a9e is 82, key is hbase:meta,,1/info:regioninfo/1731566656833/Put/seqid=0 2024-11-14T06:45:50,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43335 is added to blk_1073741855_1031 (size=5672) 2024-11-14T06:45:50,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38983 is added to blk_1073741855_1031 (size=5672) 2024-11-14T06:45:50,097 INFO [M:0;20680646cf8a:41891 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/640831f8450f475f8953148a5c8b9a9e 2024-11-14T06:45:50,112 INFO [regionserver/20680646cf8a:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T06:45:50,119 DEBUG [M:0;20680646cf8a:41891 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/4260780f77744d09816982d8f7453f94 is 767, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731566657732/Put/seqid=0 2024-11-14T06:45:50,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38983 is added to blk_1073741856_1032 (size=6248) 2024-11-14T06:45:50,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43335 is added to blk_1073741856_1032 (size=6248) 2024-11-14T06:45:50,127 INFO [M:0;20680646cf8a:41891 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.43 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/4260780f77744d09816982d8f7453f94 2024-11-14T06:45:50,135 INFO [M:0;20680646cf8a:41891 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 4260780f77744d09816982d8f7453f94 2024-11-14T06:45:50,154 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36049-0x1003cf9ab910001, quorum=127.0.0.1:61515, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T06:45:50,154 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36049-0x1003cf9ab910001, quorum=127.0.0.1:61515, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T06:45:50,155 INFO [RS:0;20680646cf8a:36049 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T06:45:50,155 INFO [RS:0;20680646cf8a:36049 {}] regionserver.HRegionServer(1031): Exiting; stopping=20680646cf8a,36049,1731566654813; zookeeper connection closed. 2024-11-14T06:45:50,155 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@28be8cc5 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@28be8cc5 2024-11-14T06:45:50,156 DEBUG [M:0;20680646cf8a:41891 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/637e5403d859420399963bec665a4523 is 69, key is 20680646cf8a,36049,1731566654813/rs:state/1731566655996/Put/seqid=0 2024-11-14T06:45:50,156 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-14T06:45:50,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43335 is added to blk_1073741857_1033 (size=5156) 2024-11-14T06:45:50,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38983 is added to blk_1073741857_1033 (size=5156) 2024-11-14T06:45:50,164 INFO [M:0;20680646cf8a:41891 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/637e5403d859420399963bec665a4523 2024-11-14T06:45:50,192 DEBUG [M:0;20680646cf8a:41891 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5cf30d9560a24436aa66192bf41a9007 is 52, key is load_balancer_on/state:d/1731566657112/Put/seqid=0 2024-11-14T06:45:50,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43335 is added to blk_1073741858_1034 (size=5056) 2024-11-14T06:45:50,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38983 is added to blk_1073741858_1034 (size=5056) 2024-11-14T06:45:50,199 INFO [M:0;20680646cf8a:41891 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5cf30d9560a24436aa66192bf41a9007 2024-11-14T06:45:50,207 DEBUG [M:0;20680646cf8a:41891 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/640831f8450f475f8953148a5c8b9a9e as hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/640831f8450f475f8953148a5c8b9a9e 2024-11-14T06:45:50,213 INFO [M:0;20680646cf8a:41891 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/640831f8450f475f8953148a5c8b9a9e, entries=8, sequenceid=59, filesize=5.5 K 2024-11-14T06:45:50,214 DEBUG [M:0;20680646cf8a:41891 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/4260780f77744d09816982d8f7453f94 as hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/4260780f77744d09816982d8f7453f94 2024-11-14T06:45:50,221 INFO [M:0;20680646cf8a:41891 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 4260780f77744d09816982d8f7453f94 2024-11-14T06:45:50,221 INFO [M:0;20680646cf8a:41891 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/4260780f77744d09816982d8f7453f94, entries=6, sequenceid=59, filesize=6.1 K 2024-11-14T06:45:50,222 DEBUG [M:0;20680646cf8a:41891 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/637e5403d859420399963bec665a4523 as hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/637e5403d859420399963bec665a4523 2024-11-14T06:45:50,228 INFO [M:0;20680646cf8a:41891 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/637e5403d859420399963bec665a4523, entries=1, sequenceid=59, filesize=5.0 K 2024-11-14T06:45:50,229 DEBUG [M:0;20680646cf8a:41891 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5cf30d9560a24436aa66192bf41a9007 as hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/5cf30d9560a24436aa66192bf41a9007 2024-11-14T06:45:50,235 INFO [M:0;20680646cf8a:41891 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/5cf30d9560a24436aa66192bf41a9007, entries=1, sequenceid=59, filesize=4.9 K 2024-11-14T06:45:50,237 INFO [M:0;20680646cf8a:41891 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.04 KB/23588, heapSize ~29.15 KB/29848, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 166ms, sequenceid=59, compaction requested=false 2024-11-14T06:45:50,238 INFO [M:0;20680646cf8a:41891 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T06:45:50,238 DEBUG [M:0;20680646cf8a:41891 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731566750071Disabling compacts and flushes for region at 1731566750071Disabling writes for close at 1731566750071Obtaining lock to block concurrent updates at 1731566750071Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731566750071Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23588, getHeapSize=29848, getOffHeapSize=0, getCellsCount=70 at 1731566750072 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731566750073 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731566750073Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731566750089 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731566750089Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731566750103 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731566750119 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731566750119Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731566750135 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731566750155 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731566750155Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731566750171 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731566750191 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731566750192 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@e5a7496: reopening flushed file at 1731566750205 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5838aa06: reopening flushed file at 1731566750213 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@600a4ccc: reopening flushed file at 1731566750221 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2ff44128: reopening flushed file at 1731566750228 (+7 ms)Finished flush of dataSize ~23.04 KB/23588, heapSize ~29.15 KB/29848, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 166ms, sequenceid=59, compaction requested=false at 1731566750237 (+9 ms)Writing region close event to WAL at 1731566750238 (+1 ms)Closed at 1731566750238 2024-11-14T06:45:50,239 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:45:50,239 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:45:50,239 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:45:50,239 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:45:50,240 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:45:50,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38983 is added to blk_1073741830_1006 (size=27985) 2024-11-14T06:45:50,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43335 is added to blk_1073741830_1006 (size=27985) 2024-11-14T06:45:50,242 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T06:45:50,243 INFO [M:0;20680646cf8a:41891 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-14T06:45:50,243 INFO [M:0;20680646cf8a:41891 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41891 2024-11-14T06:45:50,243 INFO [M:0;20680646cf8a:41891 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T06:45:50,344 INFO [M:0;20680646cf8a:41891 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T06:45:50,344 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41891-0x1003cf9ab910000, quorum=127.0.0.1:61515, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T06:45:50,345 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41891-0x1003cf9ab910000, quorum=127.0.0.1:61515, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T06:45:50,352 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1bf97579{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T06:45:50,356 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T06:45:50,356 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T06:45:50,357 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T06:45:50,357 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/da5135e7-1c3c-4121-547f-810b40c87e5f/hadoop.log.dir/,STOPPED} 2024-11-14T06:45:50,359 WARN [BP-828308674-172.17.0.2-1731566650918 heartbeating to localhost/127.0.0.1:33705 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T06:45:50,359 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T06:45:50,359 WARN [BP-828308674-172.17.0.2-1731566650918 heartbeating to localhost/127.0.0.1:33705 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-828308674-172.17.0.2-1731566650918 (Datanode Uuid dc5a9485-1db3-430d-bb85-5d3a83e8c4fe) service to localhost/127.0.0.1:33705 2024-11-14T06:45:50,359 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T06:45:50,361 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/da5135e7-1c3c-4121-547f-810b40c87e5f/cluster_2e073713-b5cb-4197-b6cd-5ecb23063947/data/data3/current/BP-828308674-172.17.0.2-1731566650918 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T06:45:50,361 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/da5135e7-1c3c-4121-547f-810b40c87e5f/cluster_2e073713-b5cb-4197-b6cd-5ecb23063947/data/data4/current/BP-828308674-172.17.0.2-1731566650918 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T06:45:50,361 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T06:45:50,366 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7b07d1ba{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T06:45:50,367 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T06:45:50,367 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T06:45:50,367 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T06:45:50,367 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/da5135e7-1c3c-4121-547f-810b40c87e5f/hadoop.log.dir/,STOPPED} 2024-11-14T06:45:50,369 WARN [BP-828308674-172.17.0.2-1731566650918 heartbeating to localhost/127.0.0.1:33705 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T06:45:50,369 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T06:45:50,369 WARN [BP-828308674-172.17.0.2-1731566650918 heartbeating to localhost/127.0.0.1:33705 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-828308674-172.17.0.2-1731566650918 (Datanode Uuid e128d9dc-a6c3-4cbc-8aa5-267311cfa7e1) service to localhost/127.0.0.1:33705 2024-11-14T06:45:50,369 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T06:45:50,369 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/da5135e7-1c3c-4121-547f-810b40c87e5f/cluster_2e073713-b5cb-4197-b6cd-5ecb23063947/data/data1/current/BP-828308674-172.17.0.2-1731566650918 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T06:45:50,370 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/da5135e7-1c3c-4121-547f-810b40c87e5f/cluster_2e073713-b5cb-4197-b6cd-5ecb23063947/data/data2/current/BP-828308674-172.17.0.2-1731566650918 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T06:45:50,370 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T06:45:50,378 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@735fa16a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T06:45:50,379 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T06:45:50,379 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T06:45:50,379 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T06:45:50,379 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/da5135e7-1c3c-4121-547f-810b40c87e5f/hadoop.log.dir/,STOPPED} 2024-11-14T06:45:50,388 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-14T06:45:50,421 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-14T06:45:50,430 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=78 (was 12) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:33705 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/20680646cf8a:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:33705 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33705 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:33705 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: LeaseRenewer:jenkins@localhost:33705 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33705 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: master/20680646cf8a:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33705 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:33705 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: regionserver/20680646cf8a:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@3e12dea0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) - Thread LEAK? -, OpenFileDescriptor=402 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=133 (was 265), ProcessCount=11 (was 11), AvailableMemoryMB=1220 (was 2363) 2024-11-14T06:45:50,436 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=79, OpenFileDescriptor=402, MaxFileDescriptor=1048576, SystemLoadAverage=133, ProcessCount=11, AvailableMemoryMB=1219 2024-11-14T06:45:50,436 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-14T06:45:50,436 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/da5135e7-1c3c-4121-547f-810b40c87e5f/hadoop.log.dir so I do NOT create it in target/test-data/1917aee2-48ea-ad8f-5abd-e62492081a8c 2024-11-14T06:45:50,436 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/da5135e7-1c3c-4121-547f-810b40c87e5f/hadoop.tmp.dir so I do NOT create it in target/test-data/1917aee2-48ea-ad8f-5abd-e62492081a8c 2024-11-14T06:45:50,436 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1917aee2-48ea-ad8f-5abd-e62492081a8c/cluster_443e9163-05dd-5154-03ea-6139b5073b4b, deleteOnExit=true 2024-11-14T06:45:50,436 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-14T06:45:50,437 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1917aee2-48ea-ad8f-5abd-e62492081a8c/test.cache.data in system properties and HBase conf 2024-11-14T06:45:50,437 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1917aee2-48ea-ad8f-5abd-e62492081a8c/hadoop.tmp.dir in system properties and HBase conf 2024-11-14T06:45:50,437 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1917aee2-48ea-ad8f-5abd-e62492081a8c/hadoop.log.dir in system properties and HBase conf 2024-11-14T06:45:50,437 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1917aee2-48ea-ad8f-5abd-e62492081a8c/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-14T06:45:50,437 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1917aee2-48ea-ad8f-5abd-e62492081a8c/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-14T06:45:50,437 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-14T06:45:50,437 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-14T06:45:50,438 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1917aee2-48ea-ad8f-5abd-e62492081a8c/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-14T06:45:50,438 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1917aee2-48ea-ad8f-5abd-e62492081a8c/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-14T06:45:50,438 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1917aee2-48ea-ad8f-5abd-e62492081a8c/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-14T06:45:50,438 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1917aee2-48ea-ad8f-5abd-e62492081a8c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T06:45:50,438 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1917aee2-48ea-ad8f-5abd-e62492081a8c/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-14T06:45:50,438 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1917aee2-48ea-ad8f-5abd-e62492081a8c/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-14T06:45:50,438 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1917aee2-48ea-ad8f-5abd-e62492081a8c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T06:45:50,438 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1917aee2-48ea-ad8f-5abd-e62492081a8c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T06:45:50,438 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1917aee2-48ea-ad8f-5abd-e62492081a8c/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-14T06:45:50,439 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1917aee2-48ea-ad8f-5abd-e62492081a8c/nfs.dump.dir in system properties and HBase conf 2024-11-14T06:45:50,439 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1917aee2-48ea-ad8f-5abd-e62492081a8c/java.io.tmpdir in system properties and HBase conf 2024-11-14T06:45:50,439 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1917aee2-48ea-ad8f-5abd-e62492081a8c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T06:45:50,439 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1917aee2-48ea-ad8f-5abd-e62492081a8c/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-14T06:45:50,439 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1917aee2-48ea-ad8f-5abd-e62492081a8c/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-14T06:45:50,453 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T06:45:50,503 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T06:45:50,509 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T06:45:50,510 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T06:45:50,511 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T06:45:50,511 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T06:45:50,511 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T06:45:50,512 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3bd9c5b4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1917aee2-48ea-ad8f-5abd-e62492081a8c/hadoop.log.dir/,AVAILABLE} 2024-11-14T06:45:50,512 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4a3c3ceb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T06:45:50,609 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@52b0c086{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1917aee2-48ea-ad8f-5abd-e62492081a8c/java.io.tmpdir/jetty-localhost-37559-hadoop-hdfs-3_4_1-tests_jar-_-any-17697716822179225980/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T06:45:50,610 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4d9de743{HTTP/1.1, (http/1.1)}{localhost:37559} 2024-11-14T06:45:50,610 INFO [Time-limited test {}] server.Server(415): Started @102206ms 2024-11-14T06:45:50,622 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T06:45:50,677 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T06:45:50,681 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T06:45:50,681 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T06:45:50,682 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T06:45:50,682 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T06:45:50,682 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@55f7876e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1917aee2-48ea-ad8f-5abd-e62492081a8c/hadoop.log.dir/,AVAILABLE} 2024-11-14T06:45:50,682 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@607b9bc6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T06:45:50,777 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@30a1c2a3{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1917aee2-48ea-ad8f-5abd-e62492081a8c/java.io.tmpdir/jetty-localhost-41721-hadoop-hdfs-3_4_1-tests_jar-_-any-9249175110132292507/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T06:45:50,777 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4620cd8a{HTTP/1.1, (http/1.1)}{localhost:41721} 2024-11-14T06:45:50,777 INFO [Time-limited test {}] server.Server(415): Started @102374ms 2024-11-14T06:45:50,779 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T06:45:50,811 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T06:45:50,815 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T06:45:50,818 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T06:45:50,818 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T06:45:50,818 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T06:45:50,818 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@463a48f5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1917aee2-48ea-ad8f-5abd-e62492081a8c/hadoop.log.dir/,AVAILABLE} 2024-11-14T06:45:50,819 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6d944f53{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T06:45:50,844 WARN [Thread-439 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1917aee2-48ea-ad8f-5abd-e62492081a8c/cluster_443e9163-05dd-5154-03ea-6139b5073b4b/data/data2/current/BP-2101863453-172.17.0.2-1731566750464/current, will proceed with Du for space computation calculation, 2024-11-14T06:45:50,844 WARN [Thread-438 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1917aee2-48ea-ad8f-5abd-e62492081a8c/cluster_443e9163-05dd-5154-03ea-6139b5073b4b/data/data1/current/BP-2101863453-172.17.0.2-1731566750464/current, will proceed with Du for space computation calculation, 2024-11-14T06:45:50,858 WARN [Thread-417 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T06:45:50,861 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe7e0cc2c0c072d1c with lease ID 0x9177ff8b71d47b7a: Processing first storage report for DS-7870b923-d228-4343-8538-8937c53fdf0b from datanode DatanodeRegistration(127.0.0.1:32789, datanodeUuid=125bfb23-bbac-4b7a-a181-69bface61140, infoPort=37879, infoSecurePort=0, ipcPort=36199, storageInfo=lv=-57;cid=testClusterID;nsid=1415333346;c=1731566750464) 2024-11-14T06:45:50,861 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe7e0cc2c0c072d1c with lease ID 0x9177ff8b71d47b7a: from storage DS-7870b923-d228-4343-8538-8937c53fdf0b node DatanodeRegistration(127.0.0.1:32789, datanodeUuid=125bfb23-bbac-4b7a-a181-69bface61140, infoPort=37879, infoSecurePort=0, ipcPort=36199, storageInfo=lv=-57;cid=testClusterID;nsid=1415333346;c=1731566750464), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T06:45:50,862 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe7e0cc2c0c072d1c with lease ID 0x9177ff8b71d47b7a: Processing first storage report for DS-09e8bca2-5197-47b4-be70-3c7eb0ba974d from datanode DatanodeRegistration(127.0.0.1:32789, datanodeUuid=125bfb23-bbac-4b7a-a181-69bface61140, infoPort=37879, infoSecurePort=0, ipcPort=36199, storageInfo=lv=-57;cid=testClusterID;nsid=1415333346;c=1731566750464) 2024-11-14T06:45:50,862 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe7e0cc2c0c072d1c with lease ID 0x9177ff8b71d47b7a: from storage DS-09e8bca2-5197-47b4-be70-3c7eb0ba974d node DatanodeRegistration(127.0.0.1:32789, datanodeUuid=125bfb23-bbac-4b7a-a181-69bface61140, infoPort=37879, infoSecurePort=0, ipcPort=36199, storageInfo=lv=-57;cid=testClusterID;nsid=1415333346;c=1731566750464), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T06:45:50,917 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@c708570{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1917aee2-48ea-ad8f-5abd-e62492081a8c/java.io.tmpdir/jetty-localhost-35743-hadoop-hdfs-3_4_1-tests_jar-_-any-17754589035718003103/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T06:45:50,918 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7ab06e68{HTTP/1.1, (http/1.1)}{localhost:35743} 2024-11-14T06:45:50,918 INFO [Time-limited test {}] server.Server(415): Started @102514ms 2024-11-14T06:45:50,919 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T06:45:50,977 WARN [Thread-465 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1917aee2-48ea-ad8f-5abd-e62492081a8c/cluster_443e9163-05dd-5154-03ea-6139b5073b4b/data/data4/current/BP-2101863453-172.17.0.2-1731566750464/current, will proceed with Du for space computation calculation, 2024-11-14T06:45:50,977 WARN [Thread-464 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1917aee2-48ea-ad8f-5abd-e62492081a8c/cluster_443e9163-05dd-5154-03ea-6139b5073b4b/data/data3/current/BP-2101863453-172.17.0.2-1731566750464/current, will proceed with Du for space computation calculation, 2024-11-14T06:45:50,992 WARN [Thread-453 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T06:45:50,995 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe49ea6fada32ed5c with lease ID 0x9177ff8b71d47b7b: Processing first storage report for DS-2e921809-3f28-4d9c-b2bd-1a04e902fb9e from datanode DatanodeRegistration(127.0.0.1:36953, datanodeUuid=b10daefd-dc88-4998-b527-19d87582c32f, infoPort=39031, infoSecurePort=0, ipcPort=34067, storageInfo=lv=-57;cid=testClusterID;nsid=1415333346;c=1731566750464) 2024-11-14T06:45:50,995 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe49ea6fada32ed5c with lease ID 0x9177ff8b71d47b7b: from storage DS-2e921809-3f28-4d9c-b2bd-1a04e902fb9e node DatanodeRegistration(127.0.0.1:36953, datanodeUuid=b10daefd-dc88-4998-b527-19d87582c32f, infoPort=39031, infoSecurePort=0, ipcPort=34067, storageInfo=lv=-57;cid=testClusterID;nsid=1415333346;c=1731566750464), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-14T06:45:50,995 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe49ea6fada32ed5c with lease ID 0x9177ff8b71d47b7b: Processing first storage report for DS-db09cd79-e71f-4e93-8d04-7f08a5a9f898 from datanode DatanodeRegistration(127.0.0.1:36953, datanodeUuid=b10daefd-dc88-4998-b527-19d87582c32f, infoPort=39031, infoSecurePort=0, ipcPort=34067, storageInfo=lv=-57;cid=testClusterID;nsid=1415333346;c=1731566750464) 2024-11-14T06:45:50,995 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe49ea6fada32ed5c with lease ID 0x9177ff8b71d47b7b: from storage DS-db09cd79-e71f-4e93-8d04-7f08a5a9f898 node DatanodeRegistration(127.0.0.1:36953, datanodeUuid=b10daefd-dc88-4998-b527-19d87582c32f, infoPort=39031, infoSecurePort=0, ipcPort=34067, storageInfo=lv=-57;cid=testClusterID;nsid=1415333346;c=1731566750464), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T06:45:51,047 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1917aee2-48ea-ad8f-5abd-e62492081a8c 2024-11-14T06:45:51,051 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1917aee2-48ea-ad8f-5abd-e62492081a8c/cluster_443e9163-05dd-5154-03ea-6139b5073b4b/zookeeper_0, clientPort=57823, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1917aee2-48ea-ad8f-5abd-e62492081a8c/cluster_443e9163-05dd-5154-03ea-6139b5073b4b/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1917aee2-48ea-ad8f-5abd-e62492081a8c/cluster_443e9163-05dd-5154-03ea-6139b5073b4b/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-14T06:45:51,052 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=57823 2024-11-14T06:45:51,052 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T06:45:51,054 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T06:45:51,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36953 is added to blk_1073741825_1001 (size=7) 2024-11-14T06:45:51,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32789 is added to blk_1073741825_1001 (size=7) 2024-11-14T06:45:51,066 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:36135/user/jenkins/test-data/b1c58c6c-ec99-aaff-4668-ec92ef1bad6a with version=8 2024-11-14T06:45:51,066 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/hbase-staging 2024-11-14T06:45:51,068 INFO [Time-limited test {}] client.ConnectionUtils(128): master/20680646cf8a:0 server-side Connection retries=45 2024-11-14T06:45:51,068 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T06:45:51,069 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T06:45:51,069 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T06:45:51,069 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T06:45:51,069 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T06:45:51,069 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-14T06:45:51,069 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T06:45:51,070 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45281 2024-11-14T06:45:51,072 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:45281 connecting to ZooKeeper ensemble=127.0.0.1:57823 2024-11-14T06:45:51,075 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:452810x0, quorum=127.0.0.1:57823, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T06:45:51,076 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:45281-0x1003cfb29b80000 connected 2024-11-14T06:45:51,089 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T06:45:51,090 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T06:45:51,092 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45281-0x1003cfb29b80000, quorum=127.0.0.1:57823, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T06:45:51,093 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:36135/user/jenkins/test-data/b1c58c6c-ec99-aaff-4668-ec92ef1bad6a, hbase.cluster.distributed=false 2024-11-14T06:45:51,094 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45281-0x1003cfb29b80000, quorum=127.0.0.1:57823, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T06:45:51,095 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45281 2024-11-14T06:45:51,095 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45281 2024-11-14T06:45:51,095 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45281 2024-11-14T06:45:51,096 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45281 2024-11-14T06:45:51,096 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45281 2024-11-14T06:45:51,115 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/20680646cf8a:0 server-side Connection retries=45 2024-11-14T06:45:51,115 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T06:45:51,115 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T06:45:51,115 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T06:45:51,115 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T06:45:51,115 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T06:45:51,115 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-14T06:45:51,115 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T06:45:51,116 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35229 2024-11-14T06:45:51,117 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35229 connecting to ZooKeeper ensemble=127.0.0.1:57823 2024-11-14T06:45:51,118 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T06:45:51,119 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T06:45:51,123 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:352290x0, quorum=127.0.0.1:57823, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T06:45:51,124 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:352290x0, quorum=127.0.0.1:57823, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T06:45:51,124 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35229-0x1003cfb29b80001 connected 2024-11-14T06:45:51,124 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-14T06:45:51,125 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-14T06:45:51,125 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35229-0x1003cfb29b80001, quorum=127.0.0.1:57823, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-14T06:45:51,126 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35229-0x1003cfb29b80001, quorum=127.0.0.1:57823, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T06:45:51,128 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35229 2024-11-14T06:45:51,129 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35229 2024-11-14T06:45:51,131 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35229 2024-11-14T06:45:51,131 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35229 2024-11-14T06:45:51,133 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35229 2024-11-14T06:45:51,144 DEBUG [M:0;20680646cf8a:45281 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;20680646cf8a:45281 2024-11-14T06:45:51,145 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/20680646cf8a,45281,1731566751068 2024-11-14T06:45:51,146 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45281-0x1003cfb29b80000, quorum=127.0.0.1:57823, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T06:45:51,146 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35229-0x1003cfb29b80001, quorum=127.0.0.1:57823, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T06:45:51,146 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45281-0x1003cfb29b80000, quorum=127.0.0.1:57823, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/20680646cf8a,45281,1731566751068 2024-11-14T06:45:51,147 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35229-0x1003cfb29b80001, quorum=127.0.0.1:57823, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-14T06:45:51,147 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35229-0x1003cfb29b80001, quorum=127.0.0.1:57823, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:45:51,147 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45281-0x1003cfb29b80000, quorum=127.0.0.1:57823, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:45:51,148 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45281-0x1003cfb29b80000, quorum=127.0.0.1:57823, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-14T06:45:51,148 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/20680646cf8a,45281,1731566751068 from backup master directory 2024-11-14T06:45:51,149 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45281-0x1003cfb29b80000, quorum=127.0.0.1:57823, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/20680646cf8a,45281,1731566751068 2024-11-14T06:45:51,149 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35229-0x1003cfb29b80001, quorum=127.0.0.1:57823, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T06:45:51,149 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45281-0x1003cfb29b80000, quorum=127.0.0.1:57823, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T06:45:51,149 WARN [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T06:45:51,149 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=20680646cf8a,45281,1731566751068 2024-11-14T06:45:51,154 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:36135/user/jenkins/test-data/b1c58c6c-ec99-aaff-4668-ec92ef1bad6a/hbase.id] with ID: 04311779-154e-4efd-a7ce-a2ce8a8f448b 2024-11-14T06:45:51,154 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:36135/user/jenkins/test-data/b1c58c6c-ec99-aaff-4668-ec92ef1bad6a/.tmp/hbase.id 2024-11-14T06:45:51,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36953 is added to blk_1073741826_1002 (size=42) 2024-11-14T06:45:51,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32789 is added to blk_1073741826_1002 (size=42) 2024-11-14T06:45:51,162 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:36135/user/jenkins/test-data/b1c58c6c-ec99-aaff-4668-ec92ef1bad6a/.tmp/hbase.id]:[hdfs://localhost:36135/user/jenkins/test-data/b1c58c6c-ec99-aaff-4668-ec92ef1bad6a/hbase.id] 2024-11-14T06:45:51,175 INFO [master/20680646cf8a:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T06:45:51,175 INFO [master/20680646cf8a:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-14T06:45:51,177 INFO [master/20680646cf8a:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-14T06:45:51,179 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45281-0x1003cfb29b80000, quorum=127.0.0.1:57823, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:45:51,179 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35229-0x1003cfb29b80001, quorum=127.0.0.1:57823, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:45:51,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36953 is added to blk_1073741827_1003 (size=196) 2024-11-14T06:45:51,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32789 is added to blk_1073741827_1003 (size=196) 2024-11-14T06:45:51,186 INFO [master/20680646cf8a:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-14T06:45:51,187 INFO [master/20680646cf8a:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-14T06:45:51,188 INFO [master/20680646cf8a:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T06:45:51,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36953 is added to blk_1073741828_1004 (size=1189) 2024-11-14T06:45:51,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32789 is added to blk_1073741828_1004 (size=1189) 2024-11-14T06:45:51,197 INFO [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:36135/user/jenkins/test-data/b1c58c6c-ec99-aaff-4668-ec92ef1bad6a/MasterData/data/master/store 2024-11-14T06:45:51,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36953 is added to blk_1073741829_1005 (size=34) 2024-11-14T06:45:51,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32789 is added to blk_1073741829_1005 (size=34) 2024-11-14T06:45:51,207 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T06:45:51,207 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T06:45:51,207 INFO [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T06:45:51,207 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T06:45:51,207 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T06:45:51,207 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T06:45:51,207 INFO [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T06:45:51,208 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731566751207Disabling compacts and flushes for region at 1731566751207Disabling writes for close at 1731566751207Writing region close event to WAL at 1731566751207Closed at 1731566751207 2024-11-14T06:45:51,209 WARN [master/20680646cf8a:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:36135/user/jenkins/test-data/b1c58c6c-ec99-aaff-4668-ec92ef1bad6a/MasterData/data/master/store/.initializing 2024-11-14T06:45:51,209 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:36135/user/jenkins/test-data/b1c58c6c-ec99-aaff-4668-ec92ef1bad6a/MasterData/WALs/20680646cf8a,45281,1731566751068 2024-11-14T06:45:51,212 INFO [master/20680646cf8a:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=20680646cf8a%2C45281%2C1731566751068, suffix=, logDir=hdfs://localhost:36135/user/jenkins/test-data/b1c58c6c-ec99-aaff-4668-ec92ef1bad6a/MasterData/WALs/20680646cf8a,45281,1731566751068, archiveDir=hdfs://localhost:36135/user/jenkins/test-data/b1c58c6c-ec99-aaff-4668-ec92ef1bad6a/MasterData/oldWALs, maxLogs=10 2024-11-14T06:45:51,212 INFO [master/20680646cf8a:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 20680646cf8a%2C45281%2C1731566751068.1731566751212 2024-11-14T06:45:51,218 INFO [master/20680646cf8a:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b1c58c6c-ec99-aaff-4668-ec92ef1bad6a/MasterData/WALs/20680646cf8a,45281,1731566751068/20680646cf8a%2C45281%2C1731566751068.1731566751212 2024-11-14T06:45:51,221 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39031:39031),(127.0.0.1/127.0.0.1:37879:37879)] 2024-11-14T06:45:51,224 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-14T06:45:51,224 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T06:45:51,225 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:45:51,225 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:45:51,226 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:45:51,228 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-14T06:45:51,228 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:45:51,229 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T06:45:51,229 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:45:51,231 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-14T06:45:51,231 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:45:51,231 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T06:45:51,231 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:45:51,234 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-14T06:45:51,234 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:45:51,235 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T06:45:51,236 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:45:51,238 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-14T06:45:51,238 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:45:51,238 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T06:45:51,238 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:45:51,239 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36135/user/jenkins/test-data/b1c58c6c-ec99-aaff-4668-ec92ef1bad6a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:45:51,240 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36135/user/jenkins/test-data/b1c58c6c-ec99-aaff-4668-ec92ef1bad6a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:45:51,242 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:45:51,242 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:45:51,243 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-14T06:45:51,244 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:45:51,247 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36135/user/jenkins/test-data/b1c58c6c-ec99-aaff-4668-ec92ef1bad6a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T06:45:51,248 INFO [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=708140, jitterRate=-0.09955455362796783}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-14T06:45:51,249 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731566751225Initializing all the Stores at 1731566751226 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731566751226Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731566751226Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731566751226Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731566751226Cleaning up temporary data from old regions at 1731566751242 (+16 ms)Region opened successfully at 1731566751249 (+7 ms) 2024-11-14T06:45:51,250 INFO [master/20680646cf8a:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-14T06:45:51,255 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72d83e84, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=20680646cf8a/172.17.0.2:0 2024-11-14T06:45:51,256 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-14T06:45:51,256 INFO [master/20680646cf8a:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-14T06:45:51,256 INFO [master/20680646cf8a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-14T06:45:51,256 INFO [master/20680646cf8a:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-14T06:45:51,257 INFO [master/20680646cf8a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-14T06:45:51,258 INFO [master/20680646cf8a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-14T06:45:51,258 INFO [master/20680646cf8a:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-14T06:45:51,260 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-14T06:45:51,262 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45281-0x1003cfb29b80000, quorum=127.0.0.1:57823, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-14T06:45:51,263 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-14T06:45:51,263 INFO [master/20680646cf8a:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-14T06:45:51,264 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45281-0x1003cfb29b80000, quorum=127.0.0.1:57823, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-14T06:45:51,265 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-14T06:45:51,265 INFO [master/20680646cf8a:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-14T06:45:51,266 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45281-0x1003cfb29b80000, quorum=127.0.0.1:57823, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-14T06:45:51,267 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-14T06:45:51,268 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45281-0x1003cfb29b80000, quorum=127.0.0.1:57823, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-14T06:45:51,269 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-14T06:45:51,271 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45281-0x1003cfb29b80000, quorum=127.0.0.1:57823, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-14T06:45:51,272 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-14T06:45:51,273 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35229-0x1003cfb29b80001, quorum=127.0.0.1:57823, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T06:45:51,273 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45281-0x1003cfb29b80000, quorum=127.0.0.1:57823, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T06:45:51,273 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35229-0x1003cfb29b80001, quorum=127.0.0.1:57823, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:45:51,273 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45281-0x1003cfb29b80000, quorum=127.0.0.1:57823, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:45:51,273 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=20680646cf8a,45281,1731566751068, sessionid=0x1003cfb29b80000, setting cluster-up flag (Was=false) 2024-11-14T06:45:51,275 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35229-0x1003cfb29b80001, quorum=127.0.0.1:57823, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:45:51,275 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45281-0x1003cfb29b80000, quorum=127.0.0.1:57823, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:45:51,278 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-14T06:45:51,279 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=20680646cf8a,45281,1731566751068 2024-11-14T06:45:51,282 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35229-0x1003cfb29b80001, quorum=127.0.0.1:57823, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:45:51,282 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45281-0x1003cfb29b80000, quorum=127.0.0.1:57823, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:45:51,285 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-14T06:45:51,286 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=20680646cf8a,45281,1731566751068 2024-11-14T06:45:51,288 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:36135/user/jenkins/test-data/b1c58c6c-ec99-aaff-4668-ec92ef1bad6a/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-14T06:45:51,290 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-14T06:45:51,290 INFO [master/20680646cf8a:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-14T06:45:51,290 INFO [master/20680646cf8a:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-14T06:45:51,290 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 20680646cf8a,45281,1731566751068 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-14T06:45:51,292 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/20680646cf8a:0, corePoolSize=5, maxPoolSize=5 2024-11-14T06:45:51,292 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/20680646cf8a:0, corePoolSize=5, maxPoolSize=5 2024-11-14T06:45:51,292 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/20680646cf8a:0, corePoolSize=5, maxPoolSize=5 2024-11-14T06:45:51,292 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/20680646cf8a:0, corePoolSize=5, maxPoolSize=5 2024-11-14T06:45:51,292 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/20680646cf8a:0, corePoolSize=10, maxPoolSize=10 2024-11-14T06:45:51,292 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:45:51,293 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/20680646cf8a:0, corePoolSize=2, maxPoolSize=2 2024-11-14T06:45:51,293 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:45:51,293 INFO [master/20680646cf8a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731566781293 2024-11-14T06:45:51,293 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-14T06:45:51,294 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-14T06:45:51,294 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-14T06:45:51,294 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-14T06:45:51,294 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-14T06:45:51,294 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-14T06:45:51,294 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T06:45:51,294 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-14T06:45:51,294 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-14T06:45:51,294 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-14T06:45:51,295 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T06:45:51,295 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-14T06:45:51,295 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-14T06:45:51,295 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-14T06:45:51,295 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/20680646cf8a:0:becomeActiveMaster-HFileCleaner.large.0-1731566751295,5,FailOnTimeoutGroup] 2024-11-14T06:45:51,295 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/20680646cf8a:0:becomeActiveMaster-HFileCleaner.small.0-1731566751295,5,FailOnTimeoutGroup] 2024-11-14T06:45:51,295 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T06:45:51,295 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-14T06:45:51,295 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-14T06:45:51,296 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-14T06:45:51,296 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:45:51,296 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-14T06:45:51,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32789 is added to blk_1073741831_1007 (size=1321) 2024-11-14T06:45:51,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36953 is added to blk_1073741831_1007 (size=1321) 2024-11-14T06:45:51,304 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:36135/user/jenkins/test-data/b1c58c6c-ec99-aaff-4668-ec92ef1bad6a/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-14T06:45:51,305 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:36135/user/jenkins/test-data/b1c58c6c-ec99-aaff-4668-ec92ef1bad6a 2024-11-14T06:45:51,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36953 is added to blk_1073741832_1008 (size=32) 2024-11-14T06:45:51,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32789 is added to blk_1073741832_1008 (size=32) 2024-11-14T06:45:51,313 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T06:45:51,315 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T06:45:51,316 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T06:45:51,317 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:45:51,317 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T06:45:51,317 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T06:45:51,319 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T06:45:51,319 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:45:51,320 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T06:45:51,320 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T06:45:51,322 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T06:45:51,322 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:45:51,322 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T06:45:51,323 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T06:45:51,324 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T06:45:51,324 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:45:51,325 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T06:45:51,325 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T06:45:51,326 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36135/user/jenkins/test-data/b1c58c6c-ec99-aaff-4668-ec92ef1bad6a/data/hbase/meta/1588230740 2024-11-14T06:45:51,326 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36135/user/jenkins/test-data/b1c58c6c-ec99-aaff-4668-ec92ef1bad6a/data/hbase/meta/1588230740 2024-11-14T06:45:51,328 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T06:45:51,328 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T06:45:51,329 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T06:45:51,330 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T06:45:51,333 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36135/user/jenkins/test-data/b1c58c6c-ec99-aaff-4668-ec92ef1bad6a/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T06:45:51,334 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=766727, jitterRate=-0.02505733072757721}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T06:45:51,335 INFO [RS:0;20680646cf8a:35229 {}] regionserver.HRegionServer(746): ClusterId : 04311779-154e-4efd-a7ce-a2ce8a8f448b 2024-11-14T06:45:51,335 DEBUG [RS:0;20680646cf8a:35229 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-14T06:45:51,335 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731566751313Initializing all the Stores at 1731566751314 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731566751314Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731566751315 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731566751315Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731566751315Cleaning up temporary data from old regions at 1731566751328 (+13 ms)Region opened successfully at 1731566751335 (+7 ms) 2024-11-14T06:45:51,335 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T06:45:51,335 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T06:45:51,335 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T06:45:51,336 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T06:45:51,336 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T06:45:51,336 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T06:45:51,336 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731566751335Disabling compacts and flushes for region at 1731566751335Disabling writes for close at 1731566751336 (+1 ms)Writing region close event to WAL at 1731566751336Closed at 1731566751336 2024-11-14T06:45:51,336 DEBUG [RS:0;20680646cf8a:35229 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-14T06:45:51,336 DEBUG [RS:0;20680646cf8a:35229 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-14T06:45:51,338 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T06:45:51,338 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-14T06:45:51,338 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-14T06:45:51,338 DEBUG [RS:0;20680646cf8a:35229 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-14T06:45:51,339 DEBUG [RS:0;20680646cf8a:35229 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@52d5f963, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=20680646cf8a/172.17.0.2:0 2024-11-14T06:45:51,340 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T06:45:51,341 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-14T06:45:51,352 DEBUG [RS:0;20680646cf8a:35229 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;20680646cf8a:35229 2024-11-14T06:45:51,352 INFO [RS:0;20680646cf8a:35229 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-14T06:45:51,353 INFO [RS:0;20680646cf8a:35229 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-14T06:45:51,353 DEBUG [RS:0;20680646cf8a:35229 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-14T06:45:51,354 INFO [RS:0;20680646cf8a:35229 {}] regionserver.HRegionServer(2659): reportForDuty to master=20680646cf8a,45281,1731566751068 with port=35229, startcode=1731566751114 2024-11-14T06:45:51,354 DEBUG [RS:0;20680646cf8a:35229 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-14T06:45:51,356 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37165, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-14T06:45:51,357 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45281 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 20680646cf8a,35229,1731566751114 2024-11-14T06:45:51,357 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45281 {}] master.ServerManager(517): Registering regionserver=20680646cf8a,35229,1731566751114 2024-11-14T06:45:51,359 DEBUG [RS:0;20680646cf8a:35229 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36135/user/jenkins/test-data/b1c58c6c-ec99-aaff-4668-ec92ef1bad6a 2024-11-14T06:45:51,359 DEBUG [RS:0;20680646cf8a:35229 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36135 2024-11-14T06:45:51,359 DEBUG [RS:0;20680646cf8a:35229 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-14T06:45:51,361 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45281-0x1003cfb29b80000, quorum=127.0.0.1:57823, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T06:45:51,361 DEBUG [RS:0;20680646cf8a:35229 {}] zookeeper.ZKUtil(111): regionserver:35229-0x1003cfb29b80001, quorum=127.0.0.1:57823, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/20680646cf8a,35229,1731566751114 2024-11-14T06:45:51,361 WARN [RS:0;20680646cf8a:35229 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T06:45:51,361 INFO [RS:0;20680646cf8a:35229 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T06:45:51,362 DEBUG [RS:0;20680646cf8a:35229 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36135/user/jenkins/test-data/b1c58c6c-ec99-aaff-4668-ec92ef1bad6a/WALs/20680646cf8a,35229,1731566751114 2024-11-14T06:45:51,362 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [20680646cf8a,35229,1731566751114] 2024-11-14T06:45:51,365 INFO [RS:0;20680646cf8a:35229 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-14T06:45:51,368 INFO [RS:0;20680646cf8a:35229 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-14T06:45:51,368 INFO [RS:0;20680646cf8a:35229 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-14T06:45:51,368 INFO [RS:0;20680646cf8a:35229 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T06:45:51,369 INFO [RS:0;20680646cf8a:35229 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-14T06:45:51,370 INFO [RS:0;20680646cf8a:35229 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-14T06:45:51,370 INFO [RS:0;20680646cf8a:35229 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-14T06:45:51,370 DEBUG [RS:0;20680646cf8a:35229 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:45:51,370 DEBUG [RS:0;20680646cf8a:35229 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:45:51,370 DEBUG [RS:0;20680646cf8a:35229 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:45:51,371 DEBUG [RS:0;20680646cf8a:35229 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:45:51,371 DEBUG [RS:0;20680646cf8a:35229 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:45:51,371 DEBUG [RS:0;20680646cf8a:35229 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/20680646cf8a:0, corePoolSize=2, maxPoolSize=2 2024-11-14T06:45:51,371 DEBUG [RS:0;20680646cf8a:35229 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:45:51,371 DEBUG [RS:0;20680646cf8a:35229 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:45:51,371 DEBUG [RS:0;20680646cf8a:35229 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:45:51,371 DEBUG [RS:0;20680646cf8a:35229 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:45:51,371 DEBUG [RS:0;20680646cf8a:35229 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:45:51,371 DEBUG [RS:0;20680646cf8a:35229 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:45:51,371 DEBUG [RS:0;20680646cf8a:35229 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/20680646cf8a:0, corePoolSize=3, maxPoolSize=3 2024-11-14T06:45:51,372 DEBUG [RS:0;20680646cf8a:35229 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/20680646cf8a:0, corePoolSize=3, maxPoolSize=3 2024-11-14T06:45:51,373 INFO [RS:0;20680646cf8a:35229 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T06:45:51,373 INFO [RS:0;20680646cf8a:35229 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T06:45:51,373 INFO [RS:0;20680646cf8a:35229 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T06:45:51,373 INFO [RS:0;20680646cf8a:35229 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-14T06:45:51,373 INFO [RS:0;20680646cf8a:35229 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-14T06:45:51,373 INFO [RS:0;20680646cf8a:35229 {}] hbase.ChoreService(168): Chore ScheduledChore name=20680646cf8a,35229,1731566751114-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T06:45:51,387 INFO [RS:0;20680646cf8a:35229 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-14T06:45:51,387 INFO [RS:0;20680646cf8a:35229 {}] hbase.ChoreService(168): Chore ScheduledChore name=20680646cf8a,35229,1731566751114-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T06:45:51,388 INFO [RS:0;20680646cf8a:35229 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T06:45:51,388 INFO [RS:0;20680646cf8a:35229 {}] regionserver.Replication(171): 20680646cf8a,35229,1731566751114 started 2024-11-14T06:45:51,401 INFO [RS:0;20680646cf8a:35229 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T06:45:51,402 INFO [RS:0;20680646cf8a:35229 {}] regionserver.HRegionServer(1482): Serving as 20680646cf8a,35229,1731566751114, RpcServer on 20680646cf8a/172.17.0.2:35229, sessionid=0x1003cfb29b80001 2024-11-14T06:45:51,402 DEBUG [RS:0;20680646cf8a:35229 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-14T06:45:51,402 DEBUG [RS:0;20680646cf8a:35229 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 20680646cf8a,35229,1731566751114 2024-11-14T06:45:51,402 DEBUG [RS:0;20680646cf8a:35229 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '20680646cf8a,35229,1731566751114' 2024-11-14T06:45:51,402 DEBUG [RS:0;20680646cf8a:35229 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-14T06:45:51,403 DEBUG [RS:0;20680646cf8a:35229 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-14T06:45:51,403 DEBUG [RS:0;20680646cf8a:35229 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-14T06:45:51,403 DEBUG [RS:0;20680646cf8a:35229 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-14T06:45:51,403 DEBUG [RS:0;20680646cf8a:35229 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 20680646cf8a,35229,1731566751114 2024-11-14T06:45:51,403 DEBUG [RS:0;20680646cf8a:35229 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '20680646cf8a,35229,1731566751114' 2024-11-14T06:45:51,403 DEBUG [RS:0;20680646cf8a:35229 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-14T06:45:51,404 DEBUG [RS:0;20680646cf8a:35229 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-14T06:45:51,404 DEBUG [RS:0;20680646cf8a:35229 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-14T06:45:51,404 INFO [RS:0;20680646cf8a:35229 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-14T06:45:51,404 INFO [RS:0;20680646cf8a:35229 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-14T06:45:51,492 WARN [20680646cf8a:45281 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-14T06:45:51,509 INFO [RS:0;20680646cf8a:35229 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=20680646cf8a%2C35229%2C1731566751114, suffix=, logDir=hdfs://localhost:36135/user/jenkins/test-data/b1c58c6c-ec99-aaff-4668-ec92ef1bad6a/WALs/20680646cf8a,35229,1731566751114, archiveDir=hdfs://localhost:36135/user/jenkins/test-data/b1c58c6c-ec99-aaff-4668-ec92ef1bad6a/oldWALs, maxLogs=32 2024-11-14T06:45:51,513 INFO [RS:0;20680646cf8a:35229 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 20680646cf8a%2C35229%2C1731566751114.1731566751513 2024-11-14T06:45:51,521 INFO [RS:0;20680646cf8a:35229 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b1c58c6c-ec99-aaff-4668-ec92ef1bad6a/WALs/20680646cf8a,35229,1731566751114/20680646cf8a%2C35229%2C1731566751114.1731566751513 2024-11-14T06:45:51,526 DEBUG [RS:0;20680646cf8a:35229 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39031:39031),(127.0.0.1/127.0.0.1:37879:37879)] 2024-11-14T06:45:51,742 DEBUG [20680646cf8a:45281 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-14T06:45:51,743 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=20680646cf8a,35229,1731566751114 2024-11-14T06:45:51,747 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 20680646cf8a,35229,1731566751114, state=OPENING 2024-11-14T06:45:51,749 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-14T06:45:51,752 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35229-0x1003cfb29b80001, quorum=127.0.0.1:57823, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:45:51,752 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45281-0x1003cfb29b80000, quorum=127.0.0.1:57823, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:45:51,753 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T06:45:51,753 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T06:45:51,753 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T06:45:51,753 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=20680646cf8a,35229,1731566751114}] 2024-11-14T06:45:51,908 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-14T06:45:51,914 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36613, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-14T06:45:51,919 INFO [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-14T06:45:51,920 INFO [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T06:45:51,923 INFO [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=20680646cf8a%2C35229%2C1731566751114.meta, suffix=.meta, logDir=hdfs://localhost:36135/user/jenkins/test-data/b1c58c6c-ec99-aaff-4668-ec92ef1bad6a/WALs/20680646cf8a,35229,1731566751114, archiveDir=hdfs://localhost:36135/user/jenkins/test-data/b1c58c6c-ec99-aaff-4668-ec92ef1bad6a/oldWALs, maxLogs=32 2024-11-14T06:45:51,927 INFO [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 20680646cf8a%2C35229%2C1731566751114.meta.1731566751926.meta 2024-11-14T06:45:51,933 INFO [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b1c58c6c-ec99-aaff-4668-ec92ef1bad6a/WALs/20680646cf8a,35229,1731566751114/20680646cf8a%2C35229%2C1731566751114.meta.1731566751926.meta 2024-11-14T06:45:51,934 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39031:39031),(127.0.0.1/127.0.0.1:37879:37879)] 2024-11-14T06:45:51,938 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-14T06:45:51,938 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-14T06:45:51,938 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-14T06:45:51,938 INFO [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-14T06:45:51,938 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-14T06:45:51,938 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T06:45:51,938 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-14T06:45:51,939 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-14T06:45:51,941 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T06:45:51,942 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T06:45:51,942 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:45:51,943 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T06:45:51,943 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T06:45:51,944 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T06:45:51,944 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:45:51,944 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T06:45:51,944 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T06:45:51,945 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T06:45:51,945 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:45:51,946 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T06:45:51,946 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T06:45:51,947 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T06:45:51,947 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:45:51,947 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T06:45:51,948 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T06:45:51,949 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36135/user/jenkins/test-data/b1c58c6c-ec99-aaff-4668-ec92ef1bad6a/data/hbase/meta/1588230740 2024-11-14T06:45:51,950 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36135/user/jenkins/test-data/b1c58c6c-ec99-aaff-4668-ec92ef1bad6a/data/hbase/meta/1588230740 2024-11-14T06:45:51,952 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T06:45:51,952 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T06:45:51,952 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T06:45:51,954 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T06:45:51,955 INFO [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=725419, jitterRate=-0.07758234441280365}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T06:45:51,955 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-14T06:45:51,957 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731566751939Writing region info on filesystem at 1731566751939Initializing all the Stores at 1731566751940 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731566751940Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731566751941 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731566751941Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731566751941Cleaning up temporary data from old regions at 1731566751952 (+11 ms)Running coprocessor post-open hooks at 1731566751955 (+3 ms)Region opened successfully at 1731566751956 (+1 ms) 2024-11-14T06:45:51,958 INFO [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731566751907 2024-11-14T06:45:51,961 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-14T06:45:51,961 INFO [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-14T06:45:51,962 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=20680646cf8a,35229,1731566751114 2024-11-14T06:45:51,963 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 20680646cf8a,35229,1731566751114, state=OPEN 2024-11-14T06:45:51,966 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45281-0x1003cfb29b80000, quorum=127.0.0.1:57823, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T06:45:51,966 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35229-0x1003cfb29b80001, quorum=127.0.0.1:57823, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T06:45:51,966 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=20680646cf8a,35229,1731566751114 2024-11-14T06:45:51,966 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T06:45:51,966 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T06:45:51,969 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-14T06:45:51,969 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=20680646cf8a,35229,1731566751114 in 213 msec 2024-11-14T06:45:51,972 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-14T06:45:51,972 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 631 msec 2024-11-14T06:45:51,973 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T06:45:51,973 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-14T06:45:51,974 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T06:45:51,974 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=20680646cf8a,35229,1731566751114, seqNum=-1] 2024-11-14T06:45:51,975 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T06:45:51,976 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54203, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T06:45:51,983 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 692 msec 2024-11-14T06:45:51,983 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731566751983, completionTime=-1 2024-11-14T06:45:51,983 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-14T06:45:51,983 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-14T06:45:51,986 INFO [master/20680646cf8a:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-14T06:45:51,986 INFO [master/20680646cf8a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731566811986 2024-11-14T06:45:51,986 INFO [master/20680646cf8a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731566871986 2024-11-14T06:45:51,986 INFO [master/20680646cf8a:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-14T06:45:51,986 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=20680646cf8a,45281,1731566751068-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T06:45:51,986 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=20680646cf8a,45281,1731566751068-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T06:45:51,986 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=20680646cf8a,45281,1731566751068-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T06:45:51,986 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-20680646cf8a:45281, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T06:45:51,986 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-14T06:45:51,987 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-14T06:45:51,989 DEBUG [master/20680646cf8a:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-14T06:45:51,991 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.842sec 2024-11-14T06:45:51,991 INFO [master/20680646cf8a:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-14T06:45:51,991 INFO [master/20680646cf8a:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-14T06:45:51,991 INFO [master/20680646cf8a:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-14T06:45:51,991 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-14T06:45:51,991 INFO [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-14T06:45:51,991 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=20680646cf8a,45281,1731566751068-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T06:45:51,991 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=20680646cf8a,45281,1731566751068-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-14T06:45:51,994 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-14T06:45:51,994 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-14T06:45:51,994 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=20680646cf8a,45281,1731566751068-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T06:45:52,035 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4e5835fc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T06:45:52,035 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 20680646cf8a,45281,-1 for getting cluster id 2024-11-14T06:45:52,035 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-14T06:45:52,037 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '04311779-154e-4efd-a7ce-a2ce8a8f448b' 2024-11-14T06:45:52,038 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-14T06:45:52,038 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "04311779-154e-4efd-a7ce-a2ce8a8f448b" 2024-11-14T06:45:52,038 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@14f7577d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T06:45:52,038 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [20680646cf8a,45281,-1] 2024-11-14T06:45:52,038 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-14T06:45:52,039 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T06:45:52,040 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58274, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-14T06:45:52,041 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@205bfd42, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T06:45:52,042 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T06:45:52,043 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=20680646cf8a,35229,1731566751114, seqNum=-1] 2024-11-14T06:45:52,043 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T06:45:52,045 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41002, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T06:45:52,047 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=20680646cf8a,45281,1731566751068 2024-11-14T06:45:52,047 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T06:45:52,050 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-14T06:45:52,050 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-14T06:45:52,051 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T06:45:52,051 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T06:45:52,051 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T06:45:52,051 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T06:45:52,051 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-14T06:45:52,051 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-14T06:45:52,051 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1469549761, stopped=false 2024-11-14T06:45:52,051 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=20680646cf8a,45281,1731566751068 2024-11-14T06:45:52,052 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35229-0x1003cfb29b80001, quorum=127.0.0.1:57823, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T06:45:52,052 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45281-0x1003cfb29b80000, quorum=127.0.0.1:57823, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T06:45:52,053 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35229-0x1003cfb29b80001, quorum=127.0.0.1:57823, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:45:52,053 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45281-0x1003cfb29b80000, quorum=127.0.0.1:57823, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:45:52,053 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T06:45:52,053 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T06:45:52,053 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T06:45:52,053 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:45281-0x1003cfb29b80000, quorum=127.0.0.1:57823, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T06:45:52,053 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T06:45:52,053 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35229-0x1003cfb29b80001, quorum=127.0.0.1:57823, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T06:45:52,053 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '20680646cf8a,35229,1731566751114' ***** 2024-11-14T06:45:52,053 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-14T06:45:52,053 INFO [RS:0;20680646cf8a:35229 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-14T06:45:52,054 INFO [RS:0;20680646cf8a:35229 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-14T06:45:52,054 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-14T06:45:52,054 INFO [RS:0;20680646cf8a:35229 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-14T06:45:52,054 INFO [RS:0;20680646cf8a:35229 {}] regionserver.HRegionServer(959): stopping server 20680646cf8a,35229,1731566751114 2024-11-14T06:45:52,054 INFO [RS:0;20680646cf8a:35229 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T06:45:52,054 INFO [RS:0;20680646cf8a:35229 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;20680646cf8a:35229. 2024-11-14T06:45:52,054 DEBUG [RS:0;20680646cf8a:35229 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T06:45:52,054 DEBUG [RS:0;20680646cf8a:35229 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T06:45:52,054 INFO [RS:0;20680646cf8a:35229 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-14T06:45:52,054 INFO [RS:0;20680646cf8a:35229 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-14T06:45:52,054 INFO [RS:0;20680646cf8a:35229 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-14T06:45:52,054 INFO [RS:0;20680646cf8a:35229 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-14T06:45:52,055 INFO [RS:0;20680646cf8a:35229 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-14T06:45:52,055 DEBUG [RS:0;20680646cf8a:35229 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-14T06:45:52,055 DEBUG [RS:0;20680646cf8a:35229 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-14T06:45:52,055 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T06:45:52,055 INFO [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T06:45:52,055 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T06:45:52,055 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T06:45:52,055 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T06:45:52,055 INFO [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-14T06:45:52,072 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36135/user/jenkins/test-data/b1c58c6c-ec99-aaff-4668-ec92ef1bad6a/data/hbase/meta/1588230740/.tmp/ns/a8b69a57cb47406d82aaa5cfaf71a098 is 43, key is default/ns:d/1731566751976/Put/seqid=0 2024-11-14T06:45:52,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32789 is added to blk_1073741835_1011 (size=5153) 2024-11-14T06:45:52,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36953 is added to blk_1073741835_1011 (size=5153) 2024-11-14T06:45:52,078 INFO [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36135/user/jenkins/test-data/b1c58c6c-ec99-aaff-4668-ec92ef1bad6a/data/hbase/meta/1588230740/.tmp/ns/a8b69a57cb47406d82aaa5cfaf71a098 2024-11-14T06:45:52,086 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36135/user/jenkins/test-data/b1c58c6c-ec99-aaff-4668-ec92ef1bad6a/data/hbase/meta/1588230740/.tmp/ns/a8b69a57cb47406d82aaa5cfaf71a098 as hdfs://localhost:36135/user/jenkins/test-data/b1c58c6c-ec99-aaff-4668-ec92ef1bad6a/data/hbase/meta/1588230740/ns/a8b69a57cb47406d82aaa5cfaf71a098 2024-11-14T06:45:52,093 INFO [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36135/user/jenkins/test-data/b1c58c6c-ec99-aaff-4668-ec92ef1bad6a/data/hbase/meta/1588230740/ns/a8b69a57cb47406d82aaa5cfaf71a098, entries=2, sequenceid=6, filesize=5.0 K 2024-11-14T06:45:52,094 INFO [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 39ms, sequenceid=6, compaction requested=false 2024-11-14T06:45:52,099 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36135/user/jenkins/test-data/b1c58c6c-ec99-aaff-4668-ec92ef1bad6a/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-14T06:45:52,100 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T06:45:52,100 INFO [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T06:45:52,100 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731566752055Running coprocessor pre-close hooks at 1731566752055Disabling compacts and flushes for region at 1731566752055Disabling writes for close at 1731566752055Obtaining lock to block concurrent updates at 1731566752055Preparing flush snapshotting stores in 1588230740 at 1731566752055Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731566752055Flushing stores of hbase:meta,,1.1588230740 at 1731566752056 (+1 ms)Flushing 1588230740/ns: creating writer at 1731566752056Flushing 1588230740/ns: appending metadata at 1731566752071 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1731566752071Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3ad4901a: reopening flushed file at 1731566752085 (+14 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 39ms, sequenceid=6, compaction requested=false at 1731566752094 (+9 ms)Writing region close event to WAL at 1731566752095 (+1 ms)Running coprocessor post-close hooks at 1731566752100 (+5 ms)Closed at 1731566752100 2024-11-14T06:45:52,101 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-14T06:45:52,255 INFO [RS:0;20680646cf8a:35229 {}] regionserver.HRegionServer(976): stopping server 20680646cf8a,35229,1731566751114; all regions closed. 2024-11-14T06:45:52,256 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:45:52,256 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:45:52,256 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:45:52,256 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:45:52,256 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:45:52,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32789 is added to blk_1073741834_1010 (size=1152) 2024-11-14T06:45:52,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36953 is added to blk_1073741834_1010 (size=1152) 2024-11-14T06:45:52,262 DEBUG [RS:0;20680646cf8a:35229 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/b1c58c6c-ec99-aaff-4668-ec92ef1bad6a/oldWALs 2024-11-14T06:45:52,262 INFO [RS:0;20680646cf8a:35229 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 20680646cf8a%2C35229%2C1731566751114.meta:.meta(num 1731566751926) 2024-11-14T06:45:52,262 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:45:52,262 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:45:52,262 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:45:52,263 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:45:52,263 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:45:52,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32789 is added to blk_1073741833_1009 (size=93) 2024-11-14T06:45:52,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36953 is added to blk_1073741833_1009 (size=93) 2024-11-14T06:45:52,268 DEBUG [RS:0;20680646cf8a:35229 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/b1c58c6c-ec99-aaff-4668-ec92ef1bad6a/oldWALs 2024-11-14T06:45:52,268 INFO [RS:0;20680646cf8a:35229 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 20680646cf8a%2C35229%2C1731566751114:(num 1731566751513) 2024-11-14T06:45:52,268 DEBUG [RS:0;20680646cf8a:35229 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T06:45:52,268 INFO [RS:0;20680646cf8a:35229 {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T06:45:52,268 INFO [RS:0;20680646cf8a:35229 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T06:45:52,268 INFO [RS:0;20680646cf8a:35229 {}] hbase.ChoreService(370): Chore service for: regionserver/20680646cf8a:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-14T06:45:52,268 INFO [RS:0;20680646cf8a:35229 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T06:45:52,268 INFO [regionserver/20680646cf8a:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T06:45:52,269 INFO [RS:0;20680646cf8a:35229 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35229 2024-11-14T06:45:52,270 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35229-0x1003cfb29b80001, quorum=127.0.0.1:57823, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/20680646cf8a,35229,1731566751114 2024-11-14T06:45:52,270 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45281-0x1003cfb29b80000, quorum=127.0.0.1:57823, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T06:45:52,270 INFO [RS:0;20680646cf8a:35229 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T06:45:52,271 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [20680646cf8a,35229,1731566751114] 2024-11-14T06:45:52,271 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/20680646cf8a,35229,1731566751114 already deleted, retry=false 2024-11-14T06:45:52,272 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 20680646cf8a,35229,1731566751114 expired; onlineServers=0 2024-11-14T06:45:52,272 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '20680646cf8a,45281,1731566751068' ***** 2024-11-14T06:45:52,272 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-14T06:45:52,272 INFO [M:0;20680646cf8a:45281 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T06:45:52,272 INFO [M:0;20680646cf8a:45281 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T06:45:52,272 DEBUG [M:0;20680646cf8a:45281 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-14T06:45:52,272 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-14T06:45:52,272 DEBUG [M:0;20680646cf8a:45281 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-14T06:45:52,272 DEBUG [master/20680646cf8a:0:becomeActiveMaster-HFileCleaner.large.0-1731566751295 {}] cleaner.HFileCleaner(306): Exit Thread[master/20680646cf8a:0:becomeActiveMaster-HFileCleaner.large.0-1731566751295,5,FailOnTimeoutGroup] 2024-11-14T06:45:52,272 DEBUG [master/20680646cf8a:0:becomeActiveMaster-HFileCleaner.small.0-1731566751295 {}] cleaner.HFileCleaner(306): Exit Thread[master/20680646cf8a:0:becomeActiveMaster-HFileCleaner.small.0-1731566751295,5,FailOnTimeoutGroup] 2024-11-14T06:45:52,272 INFO [M:0;20680646cf8a:45281 {}] hbase.ChoreService(370): Chore service for: master/20680646cf8a:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-14T06:45:52,272 INFO [M:0;20680646cf8a:45281 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T06:45:52,272 DEBUG [M:0;20680646cf8a:45281 {}] master.HMaster(1795): Stopping service threads 2024-11-14T06:45:52,272 INFO [M:0;20680646cf8a:45281 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-14T06:45:52,272 INFO [M:0;20680646cf8a:45281 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T06:45:52,273 INFO [M:0;20680646cf8a:45281 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-14T06:45:52,273 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-14T06:45:52,273 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45281-0x1003cfb29b80000, quorum=127.0.0.1:57823, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-14T06:45:52,273 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45281-0x1003cfb29b80000, quorum=127.0.0.1:57823, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:45:52,273 DEBUG [M:0;20680646cf8a:45281 {}] zookeeper.ZKUtil(347): master:45281-0x1003cfb29b80000, quorum=127.0.0.1:57823, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-14T06:45:52,273 WARN [M:0;20680646cf8a:45281 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-14T06:45:52,274 INFO [M:0;20680646cf8a:45281 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:36135/user/jenkins/test-data/b1c58c6c-ec99-aaff-4668-ec92ef1bad6a/.lastflushedseqids 2024-11-14T06:45:52,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36953 is added to blk_1073741836_1012 (size=99) 2024-11-14T06:45:52,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32789 is added to blk_1073741836_1012 (size=99) 2024-11-14T06:45:52,282 INFO [M:0;20680646cf8a:45281 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-14T06:45:52,282 INFO [M:0;20680646cf8a:45281 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-14T06:45:52,282 DEBUG [M:0;20680646cf8a:45281 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T06:45:52,282 INFO [M:0;20680646cf8a:45281 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T06:45:52,282 DEBUG [M:0;20680646cf8a:45281 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T06:45:52,282 DEBUG [M:0;20680646cf8a:45281 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T06:45:52,282 DEBUG [M:0;20680646cf8a:45281 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T06:45:52,282 INFO [M:0;20680646cf8a:45281 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-14T06:45:52,300 DEBUG [M:0;20680646cf8a:45281 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36135/user/jenkins/test-data/b1c58c6c-ec99-aaff-4668-ec92ef1bad6a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/83bfdfc5b48b484db18fca2df71ee123 is 82, key is hbase:meta,,1/info:regioninfo/1731566751962/Put/seqid=0 2024-11-14T06:45:52,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36953 is added to blk_1073741837_1013 (size=5672) 2024-11-14T06:45:52,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32789 is added to blk_1073741837_1013 (size=5672) 2024-11-14T06:45:52,306 INFO [M:0;20680646cf8a:45281 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:36135/user/jenkins/test-data/b1c58c6c-ec99-aaff-4668-ec92ef1bad6a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/83bfdfc5b48b484db18fca2df71ee123 2024-11-14T06:45:52,327 DEBUG [M:0;20680646cf8a:45281 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36135/user/jenkins/test-data/b1c58c6c-ec99-aaff-4668-ec92ef1bad6a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9dea05a80c62410eb30ea2a311b72158 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731566751982/Put/seqid=0 2024-11-14T06:45:52,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32789 is added to blk_1073741838_1014 (size=5275) 2024-11-14T06:45:52,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36953 is added to blk_1073741838_1014 (size=5275) 2024-11-14T06:45:52,333 INFO [M:0;20680646cf8a:45281 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:36135/user/jenkins/test-data/b1c58c6c-ec99-aaff-4668-ec92ef1bad6a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9dea05a80c62410eb30ea2a311b72158 2024-11-14T06:45:52,355 DEBUG [M:0;20680646cf8a:45281 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36135/user/jenkins/test-data/b1c58c6c-ec99-aaff-4668-ec92ef1bad6a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f73b7767f08e46d9b7dbf1b2cc1a6068 is 69, key is 20680646cf8a,35229,1731566751114/rs:state/1731566751357/Put/seqid=0 2024-11-14T06:45:52,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36953 is added to blk_1073741839_1015 (size=5156) 2024-11-14T06:45:52,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32789 is added to blk_1073741839_1015 (size=5156) 2024-11-14T06:45:52,361 INFO [M:0;20680646cf8a:45281 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:36135/user/jenkins/test-data/b1c58c6c-ec99-aaff-4668-ec92ef1bad6a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f73b7767f08e46d9b7dbf1b2cc1a6068 2024-11-14T06:45:52,371 INFO [RS:0;20680646cf8a:35229 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T06:45:52,371 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35229-0x1003cfb29b80001, quorum=127.0.0.1:57823, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T06:45:52,371 INFO [RS:0;20680646cf8a:35229 {}] regionserver.HRegionServer(1031): Exiting; stopping=20680646cf8a,35229,1731566751114; zookeeper connection closed. 2024-11-14T06:45:52,371 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35229-0x1003cfb29b80001, quorum=127.0.0.1:57823, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T06:45:52,372 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@29b73268 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@29b73268 2024-11-14T06:45:52,372 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-14T06:45:52,384 DEBUG [M:0;20680646cf8a:45281 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36135/user/jenkins/test-data/b1c58c6c-ec99-aaff-4668-ec92ef1bad6a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/157821df57484e44970ba7b08e4d54d9 is 52, key is load_balancer_on/state:d/1731566752049/Put/seqid=0 2024-11-14T06:45:52,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32789 is added to blk_1073741840_1016 (size=5056) 2024-11-14T06:45:52,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36953 is added to blk_1073741840_1016 (size=5056) 2024-11-14T06:45:52,389 INFO [M:0;20680646cf8a:45281 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:36135/user/jenkins/test-data/b1c58c6c-ec99-aaff-4668-ec92ef1bad6a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/157821df57484e44970ba7b08e4d54d9 2024-11-14T06:45:52,398 DEBUG [M:0;20680646cf8a:45281 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36135/user/jenkins/test-data/b1c58c6c-ec99-aaff-4668-ec92ef1bad6a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/83bfdfc5b48b484db18fca2df71ee123 as hdfs://localhost:36135/user/jenkins/test-data/b1c58c6c-ec99-aaff-4668-ec92ef1bad6a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/83bfdfc5b48b484db18fca2df71ee123 2024-11-14T06:45:52,405 INFO [M:0;20680646cf8a:45281 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36135/user/jenkins/test-data/b1c58c6c-ec99-aaff-4668-ec92ef1bad6a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/83bfdfc5b48b484db18fca2df71ee123, entries=8, sequenceid=29, filesize=5.5 K 2024-11-14T06:45:52,407 DEBUG [M:0;20680646cf8a:45281 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36135/user/jenkins/test-data/b1c58c6c-ec99-aaff-4668-ec92ef1bad6a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9dea05a80c62410eb30ea2a311b72158 as hdfs://localhost:36135/user/jenkins/test-data/b1c58c6c-ec99-aaff-4668-ec92ef1bad6a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/9dea05a80c62410eb30ea2a311b72158 2024-11-14T06:45:52,413 INFO [M:0;20680646cf8a:45281 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36135/user/jenkins/test-data/b1c58c6c-ec99-aaff-4668-ec92ef1bad6a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/9dea05a80c62410eb30ea2a311b72158, entries=3, sequenceid=29, filesize=5.2 K 2024-11-14T06:45:52,414 DEBUG [M:0;20680646cf8a:45281 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36135/user/jenkins/test-data/b1c58c6c-ec99-aaff-4668-ec92ef1bad6a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f73b7767f08e46d9b7dbf1b2cc1a6068 as hdfs://localhost:36135/user/jenkins/test-data/b1c58c6c-ec99-aaff-4668-ec92ef1bad6a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/f73b7767f08e46d9b7dbf1b2cc1a6068 2024-11-14T06:45:52,421 INFO [M:0;20680646cf8a:45281 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36135/user/jenkins/test-data/b1c58c6c-ec99-aaff-4668-ec92ef1bad6a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/f73b7767f08e46d9b7dbf1b2cc1a6068, entries=1, sequenceid=29, filesize=5.0 K 2024-11-14T06:45:52,422 DEBUG [M:0;20680646cf8a:45281 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36135/user/jenkins/test-data/b1c58c6c-ec99-aaff-4668-ec92ef1bad6a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/157821df57484e44970ba7b08e4d54d9 as hdfs://localhost:36135/user/jenkins/test-data/b1c58c6c-ec99-aaff-4668-ec92ef1bad6a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/157821df57484e44970ba7b08e4d54d9 2024-11-14T06:45:52,429 INFO [M:0;20680646cf8a:45281 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36135/user/jenkins/test-data/b1c58c6c-ec99-aaff-4668-ec92ef1bad6a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/157821df57484e44970ba7b08e4d54d9, entries=1, sequenceid=29, filesize=4.9 K 2024-11-14T06:45:52,430 INFO [M:0;20680646cf8a:45281 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 148ms, sequenceid=29, compaction requested=false 2024-11-14T06:45:52,432 INFO [M:0;20680646cf8a:45281 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T06:45:52,432 DEBUG [M:0;20680646cf8a:45281 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731566752282Disabling compacts and flushes for region at 1731566752282Disabling writes for close at 1731566752282Obtaining lock to block concurrent updates at 1731566752282Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731566752282Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731566752283 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731566752284 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731566752284Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731566752299 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731566752299Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731566752312 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731566752327 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731566752327Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731566752340 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731566752354 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731566752354Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731566752367 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731566752383 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731566752383Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@25793cd7: reopening flushed file at 1731566752397 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@54b49d91: reopening flushed file at 1731566752405 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@67c1f4d5: reopening flushed file at 1731566752413 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@217e09b7: reopening flushed file at 1731566752421 (+8 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 148ms, sequenceid=29, compaction requested=false at 1731566752430 (+9 ms)Writing region close event to WAL at 1731566752432 (+2 ms)Closed at 1731566752432 2024-11-14T06:45:52,433 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:45:52,433 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:45:52,433 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:45:52,434 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:45:52,434 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:45:52,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32789 is added to blk_1073741830_1006 (size=10311) 2024-11-14T06:45:52,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36953 is added to blk_1073741830_1006 (size=10311) 2024-11-14T06:45:52,437 INFO [M:0;20680646cf8a:45281 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-14T06:45:52,437 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T06:45:52,437 INFO [M:0;20680646cf8a:45281 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45281 2024-11-14T06:45:52,437 INFO [M:0;20680646cf8a:45281 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T06:45:52,539 INFO [M:0;20680646cf8a:45281 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T06:45:52,539 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45281-0x1003cfb29b80000, quorum=127.0.0.1:57823, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T06:45:52,539 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45281-0x1003cfb29b80000, quorum=127.0.0.1:57823, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T06:45:52,544 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@c708570{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T06:45:52,545 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7ab06e68{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T06:45:52,545 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T06:45:52,545 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6d944f53{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T06:45:52,546 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@463a48f5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1917aee2-48ea-ad8f-5abd-e62492081a8c/hadoop.log.dir/,STOPPED} 2024-11-14T06:45:52,548 WARN [BP-2101863453-172.17.0.2-1731566750464 heartbeating to localhost/127.0.0.1:36135 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T06:45:52,548 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T06:45:52,548 WARN [BP-2101863453-172.17.0.2-1731566750464 heartbeating to localhost/127.0.0.1:36135 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2101863453-172.17.0.2-1731566750464 (Datanode Uuid b10daefd-dc88-4998-b527-19d87582c32f) service to localhost/127.0.0.1:36135 2024-11-14T06:45:52,548 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T06:45:52,549 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1917aee2-48ea-ad8f-5abd-e62492081a8c/cluster_443e9163-05dd-5154-03ea-6139b5073b4b/data/data3/current/BP-2101863453-172.17.0.2-1731566750464 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T06:45:52,549 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1917aee2-48ea-ad8f-5abd-e62492081a8c/cluster_443e9163-05dd-5154-03ea-6139b5073b4b/data/data4/current/BP-2101863453-172.17.0.2-1731566750464 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T06:45:52,549 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T06:45:52,551 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@30a1c2a3{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T06:45:52,551 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4620cd8a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T06:45:52,551 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T06:45:52,552 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@607b9bc6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T06:45:52,552 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@55f7876e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1917aee2-48ea-ad8f-5abd-e62492081a8c/hadoop.log.dir/,STOPPED} 2024-11-14T06:45:52,553 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T06:45:52,553 WARN [BP-2101863453-172.17.0.2-1731566750464 heartbeating to localhost/127.0.0.1:36135 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T06:45:52,553 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T06:45:52,553 WARN [BP-2101863453-172.17.0.2-1731566750464 heartbeating to localhost/127.0.0.1:36135 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2101863453-172.17.0.2-1731566750464 (Datanode Uuid 125bfb23-bbac-4b7a-a181-69bface61140) service to localhost/127.0.0.1:36135 2024-11-14T06:45:52,553 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1917aee2-48ea-ad8f-5abd-e62492081a8c/cluster_443e9163-05dd-5154-03ea-6139b5073b4b/data/data1/current/BP-2101863453-172.17.0.2-1731566750464 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T06:45:52,554 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1917aee2-48ea-ad8f-5abd-e62492081a8c/cluster_443e9163-05dd-5154-03ea-6139b5073b4b/data/data2/current/BP-2101863453-172.17.0.2-1731566750464 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T06:45:52,554 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T06:45:52,559 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@52b0c086{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T06:45:52,559 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4d9de743{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T06:45:52,559 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T06:45:52,560 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4a3c3ceb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T06:45:52,560 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3bd9c5b4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1917aee2-48ea-ad8f-5abd-e62492081a8c/hadoop.log.dir/,STOPPED} 2024-11-14T06:45:52,565 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-14T06:45:52,584 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-14T06:45:52,584 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-14T06:45:52,584 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1917aee2-48ea-ad8f-5abd-e62492081a8c/hadoop.log.dir so I do NOT create it in target/test-data/68f06828-ae2b-f982-0219-24a5c538376d 2024-11-14T06:45:52,584 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1917aee2-48ea-ad8f-5abd-e62492081a8c/hadoop.tmp.dir so I do NOT create it in target/test-data/68f06828-ae2b-f982-0219-24a5c538376d 2024-11-14T06:45:52,584 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50, deleteOnExit=true 2024-11-14T06:45:52,584 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-14T06:45:52,584 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/test.cache.data in system properties and HBase conf 2024-11-14T06:45:52,584 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/hadoop.tmp.dir in system properties and HBase conf 2024-11-14T06:45:52,584 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/hadoop.log.dir in system properties and HBase conf 2024-11-14T06:45:52,584 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-14T06:45:52,584 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-14T06:45:52,585 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-14T06:45:52,585 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-14T06:45:52,585 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-14T06:45:52,585 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-14T06:45:52,585 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-14T06:45:52,585 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T06:45:52,585 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-14T06:45:52,585 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-14T06:45:52,585 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T06:45:52,585 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T06:45:52,586 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-14T06:45:52,586 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/nfs.dump.dir in system properties and HBase conf 2024-11-14T06:45:52,586 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/java.io.tmpdir in system properties and HBase conf 2024-11-14T06:45:52,586 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T06:45:52,586 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-14T06:45:52,586 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-14T06:45:52,598 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T06:45:52,649 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T06:45:52,655 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T06:45:52,657 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T06:45:52,657 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T06:45:52,657 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T06:45:52,658 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T06:45:52,659 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@16369da1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/hadoop.log.dir/,AVAILABLE} 2024-11-14T06:45:52,660 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5e7025d4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T06:45:52,758 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7cf515b1{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/java.io.tmpdir/jetty-localhost-33321-hadoop-hdfs-3_4_1-tests_jar-_-any-3819236884343171437/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T06:45:52,759 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@c6abea1{HTTP/1.1, (http/1.1)}{localhost:33321} 2024-11-14T06:45:52,759 INFO [Time-limited test {}] server.Server(415): Started @104355ms 2024-11-14T06:45:52,770 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T06:45:52,821 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T06:45:52,827 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T06:45:52,829 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T06:45:52,829 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T06:45:52,829 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T06:45:52,830 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@35a03ba9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/hadoop.log.dir/,AVAILABLE} 2024-11-14T06:45:52,830 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5bb4f47b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T06:45:52,923 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@f1f9cf1{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/java.io.tmpdir/jetty-localhost-38737-hadoop-hdfs-3_4_1-tests_jar-_-any-12282402429946140360/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T06:45:52,924 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2df55a{HTTP/1.1, (http/1.1)}{localhost:38737} 2024-11-14T06:45:52,924 INFO [Time-limited test {}] server.Server(415): Started @104520ms 2024-11-14T06:45:52,926 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T06:45:52,960 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T06:45:52,965 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T06:45:52,966 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T06:45:52,966 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T06:45:52,966 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T06:45:52,967 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6d3d4ef0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/hadoop.log.dir/,AVAILABLE} 2024-11-14T06:45:52,967 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@39a69c39{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T06:45:52,995 WARN [Thread-657 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/data/data1/current/BP-1678146812-172.17.0.2-1731566752608/current, will proceed with Du for space computation calculation, 2024-11-14T06:45:52,996 WARN [Thread-658 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/data/data2/current/BP-1678146812-172.17.0.2-1731566752608/current, will proceed with Du for space computation calculation, 2024-11-14T06:45:53,014 WARN [Thread-636 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T06:45:53,017 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb46f7bc038a76b12 with lease ID 0xba885f94c88789a2: Processing first storage report for DS-6d8175bd-2995-46c7-bac3-4065a1e8b823 from datanode DatanodeRegistration(127.0.0.1:36149, datanodeUuid=4be95ec5-4913-4705-b42c-31dd65c3c4f4, infoPort=46257, infoSecurePort=0, ipcPort=34791, storageInfo=lv=-57;cid=testClusterID;nsid=1791374989;c=1731566752608) 2024-11-14T06:45:53,017 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb46f7bc038a76b12 with lease ID 0xba885f94c88789a2: from storage DS-6d8175bd-2995-46c7-bac3-4065a1e8b823 node DatanodeRegistration(127.0.0.1:36149, datanodeUuid=4be95ec5-4913-4705-b42c-31dd65c3c4f4, infoPort=46257, infoSecurePort=0, ipcPort=34791, storageInfo=lv=-57;cid=testClusterID;nsid=1791374989;c=1731566752608), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-14T06:45:53,017 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb46f7bc038a76b12 with lease ID 0xba885f94c88789a2: Processing first storage report for DS-2b8bb2ca-f867-4ffb-a70a-e683ebb0a54c from datanode DatanodeRegistration(127.0.0.1:36149, datanodeUuid=4be95ec5-4913-4705-b42c-31dd65c3c4f4, infoPort=46257, infoSecurePort=0, ipcPort=34791, storageInfo=lv=-57;cid=testClusterID;nsid=1791374989;c=1731566752608) 2024-11-14T06:45:53,017 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb46f7bc038a76b12 with lease ID 0xba885f94c88789a2: from storage DS-2b8bb2ca-f867-4ffb-a70a-e683ebb0a54c node DatanodeRegistration(127.0.0.1:36149, datanodeUuid=4be95ec5-4913-4705-b42c-31dd65c3c4f4, infoPort=46257, infoSecurePort=0, ipcPort=34791, storageInfo=lv=-57;cid=testClusterID;nsid=1791374989;c=1731566752608), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T06:45:53,071 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@52be898{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/java.io.tmpdir/jetty-localhost-44115-hadoop-hdfs-3_4_1-tests_jar-_-any-17090131224268083228/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T06:45:53,071 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@520d17ab{HTTP/1.1, (http/1.1)}{localhost:44115} 2024-11-14T06:45:53,071 INFO [Time-limited test {}] server.Server(415): Started @104668ms 2024-11-14T06:45:53,073 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T06:45:53,134 WARN [Thread-683 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/data/data3/current/BP-1678146812-172.17.0.2-1731566752608/current, will proceed with Du for space computation calculation, 2024-11-14T06:45:53,134 WARN [Thread-684 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/data/data4/current/BP-1678146812-172.17.0.2-1731566752608/current, will proceed with Du for space computation calculation, 2024-11-14T06:45:53,158 WARN [Thread-672 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T06:45:53,161 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x31aa11cd1702ecf1 with lease ID 0xba885f94c88789a3: Processing first storage report for DS-230efee1-675c-4a81-b43c-66956f7e849f from datanode DatanodeRegistration(127.0.0.1:43453, datanodeUuid=ff1689b7-6834-4e0f-9cff-2acd1fab3999, infoPort=46603, infoSecurePort=0, ipcPort=33767, storageInfo=lv=-57;cid=testClusterID;nsid=1791374989;c=1731566752608) 2024-11-14T06:45:53,161 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x31aa11cd1702ecf1 with lease ID 0xba885f94c88789a3: from storage DS-230efee1-675c-4a81-b43c-66956f7e849f node DatanodeRegistration(127.0.0.1:43453, datanodeUuid=ff1689b7-6834-4e0f-9cff-2acd1fab3999, infoPort=46603, infoSecurePort=0, ipcPort=33767, storageInfo=lv=-57;cid=testClusterID;nsid=1791374989;c=1731566752608), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-14T06:45:53,161 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x31aa11cd1702ecf1 with lease ID 0xba885f94c88789a3: Processing first storage report for DS-4f4b6a52-61a1-4195-8aa0-896965efc107 from datanode DatanodeRegistration(127.0.0.1:43453, datanodeUuid=ff1689b7-6834-4e0f-9cff-2acd1fab3999, infoPort=46603, infoSecurePort=0, ipcPort=33767, storageInfo=lv=-57;cid=testClusterID;nsid=1791374989;c=1731566752608) 2024-11-14T06:45:53,161 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x31aa11cd1702ecf1 with lease ID 0xba885f94c88789a3: from storage DS-4f4b6a52-61a1-4195-8aa0-896965efc107 node DatanodeRegistration(127.0.0.1:43453, datanodeUuid=ff1689b7-6834-4e0f-9cff-2acd1fab3999, infoPort=46603, infoSecurePort=0, ipcPort=33767, storageInfo=lv=-57;cid=testClusterID;nsid=1791374989;c=1731566752608), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T06:45:53,201 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d 2024-11-14T06:45:53,204 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/zookeeper_0, clientPort=50335, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-14T06:45:53,205 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=50335 2024-11-14T06:45:53,206 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T06:45:53,208 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T06:45:53,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36149 is added to blk_1073741825_1001 (size=7) 2024-11-14T06:45:53,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43453 is added to blk_1073741825_1001 (size=7) 2024-11-14T06:45:53,220 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9 with version=8 2024-11-14T06:45:53,220 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/hbase-staging 2024-11-14T06:45:53,222 INFO [Time-limited test {}] client.ConnectionUtils(128): master/20680646cf8a:0 server-side Connection retries=45 2024-11-14T06:45:53,222 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T06:45:53,222 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T06:45:53,223 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T06:45:53,223 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T06:45:53,223 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T06:45:53,223 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-14T06:45:53,223 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T06:45:53,223 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38495 2024-11-14T06:45:53,225 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:38495 connecting to ZooKeeper ensemble=127.0.0.1:50335 2024-11-14T06:45:53,229 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:384950x0, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T06:45:53,229 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:38495-0x1003cfb32220000 connected 2024-11-14T06:45:53,244 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T06:45:53,247 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T06:45:53,250 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38495-0x1003cfb32220000, quorum=127.0.0.1:50335, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T06:45:53,250 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9, hbase.cluster.distributed=false 2024-11-14T06:45:53,252 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38495-0x1003cfb32220000, quorum=127.0.0.1:50335, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T06:45:53,253 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38495 2024-11-14T06:45:53,257 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38495 2024-11-14T06:45:53,257 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38495 2024-11-14T06:45:53,258 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38495 2024-11-14T06:45:53,258 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38495 2024-11-14T06:45:53,273 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/20680646cf8a:0 server-side Connection retries=45 2024-11-14T06:45:53,273 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T06:45:53,273 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T06:45:53,273 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T06:45:53,273 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T06:45:53,273 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T06:45:53,273 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-14T06:45:53,273 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T06:45:53,274 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39105 2024-11-14T06:45:53,275 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39105 connecting to ZooKeeper ensemble=127.0.0.1:50335 2024-11-14T06:45:53,276 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T06:45:53,277 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T06:45:53,281 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:391050x0, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T06:45:53,281 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39105-0x1003cfb32220001 connected 2024-11-14T06:45:53,281 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39105-0x1003cfb32220001, quorum=127.0.0.1:50335, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T06:45:53,282 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-14T06:45:53,284 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-14T06:45:53,284 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39105-0x1003cfb32220001, quorum=127.0.0.1:50335, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-14T06:45:53,285 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39105-0x1003cfb32220001, quorum=127.0.0.1:50335, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T06:45:53,287 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39105 2024-11-14T06:45:53,287 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39105 2024-11-14T06:45:53,288 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39105 2024-11-14T06:45:53,288 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39105 2024-11-14T06:45:53,288 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39105 2024-11-14T06:45:53,299 DEBUG [M:0;20680646cf8a:38495 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;20680646cf8a:38495 2024-11-14T06:45:53,299 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/20680646cf8a,38495,1731566753222 2024-11-14T06:45:53,300 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38495-0x1003cfb32220000, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T06:45:53,300 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39105-0x1003cfb32220001, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T06:45:53,301 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38495-0x1003cfb32220000, quorum=127.0.0.1:50335, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/20680646cf8a,38495,1731566753222 2024-11-14T06:45:53,302 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39105-0x1003cfb32220001, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-14T06:45:53,302 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38495-0x1003cfb32220000, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:45:53,302 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39105-0x1003cfb32220001, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:45:53,302 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38495-0x1003cfb32220000, quorum=127.0.0.1:50335, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-14T06:45:53,303 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/20680646cf8a,38495,1731566753222 from backup master directory 2024-11-14T06:45:53,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38495-0x1003cfb32220000, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/20680646cf8a,38495,1731566753222 2024-11-14T06:45:53,304 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38495-0x1003cfb32220000, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T06:45:53,304 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39105-0x1003cfb32220001, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T06:45:53,304 WARN [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T06:45:53,304 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=20680646cf8a,38495,1731566753222 2024-11-14T06:45:53,313 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/hbase.id] with ID: 03b2ca6a-61ff-4686-b751-cc5729d8fbfc 2024-11-14T06:45:53,313 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/.tmp/hbase.id 2024-11-14T06:45:53,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36149 is added to blk_1073741826_1002 (size=42) 2024-11-14T06:45:53,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43453 is added to blk_1073741826_1002 (size=42) 2024-11-14T06:45:53,321 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/.tmp/hbase.id]:[hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/hbase.id] 2024-11-14T06:45:53,334 INFO [master/20680646cf8a:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T06:45:53,334 INFO [master/20680646cf8a:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-14T06:45:53,336 INFO [master/20680646cf8a:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-14T06:45:53,338 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38495-0x1003cfb32220000, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:45:53,338 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39105-0x1003cfb32220001, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:45:53,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36149 is added to blk_1073741827_1003 (size=196) 2024-11-14T06:45:53,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43453 is added to blk_1073741827_1003 (size=196) 2024-11-14T06:45:53,347 INFO [master/20680646cf8a:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-14T06:45:53,348 INFO [master/20680646cf8a:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-14T06:45:53,348 INFO [master/20680646cf8a:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T06:45:53,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36149 is added to blk_1073741828_1004 (size=1189) 2024-11-14T06:45:53,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43453 is added to blk_1073741828_1004 (size=1189) 2024-11-14T06:45:53,360 INFO [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/MasterData/data/master/store 2024-11-14T06:45:53,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43453 is added to blk_1073741829_1005 (size=34) 2024-11-14T06:45:53,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36149 is added to blk_1073741829_1005 (size=34) 2024-11-14T06:45:53,368 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T06:45:53,369 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T06:45:53,369 INFO [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T06:45:53,369 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T06:45:53,369 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T06:45:53,369 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T06:45:53,369 INFO [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T06:45:53,369 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731566753369Disabling compacts and flushes for region at 1731566753369Disabling writes for close at 1731566753369Writing region close event to WAL at 1731566753369Closed at 1731566753369 2024-11-14T06:45:53,370 WARN [master/20680646cf8a:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/MasterData/data/master/store/.initializing 2024-11-14T06:45:53,370 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/MasterData/WALs/20680646cf8a,38495,1731566753222 2024-11-14T06:45:53,372 INFO [master/20680646cf8a:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=20680646cf8a%2C38495%2C1731566753222, suffix=, logDir=hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/MasterData/WALs/20680646cf8a,38495,1731566753222, archiveDir=hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/MasterData/oldWALs, maxLogs=10 2024-11-14T06:45:53,373 INFO [master/20680646cf8a:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 20680646cf8a%2C38495%2C1731566753222.1731566753373 2024-11-14T06:45:53,374 INFO [regionserver/20680646cf8a:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T06:45:53,379 INFO [master/20680646cf8a:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/MasterData/WALs/20680646cf8a,38495,1731566753222/20680646cf8a%2C38495%2C1731566753222.1731566753373 2024-11-14T06:45:53,380 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46603:46603),(127.0.0.1/127.0.0.1:46257:46257)] 2024-11-14T06:45:53,384 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-14T06:45:53,385 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T06:45:53,385 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:45:53,385 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:45:53,387 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:45:53,389 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-14T06:45:53,389 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:45:53,390 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T06:45:53,390 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:45:53,392 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-14T06:45:53,392 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:45:53,392 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T06:45:53,393 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:45:53,394 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-14T06:45:53,394 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:45:53,395 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T06:45:53,395 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:45:53,397 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-14T06:45:53,397 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:45:53,398 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T06:45:53,398 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:45:53,399 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:45:53,399 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:45:53,401 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:45:53,401 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:45:53,401 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-14T06:45:53,403 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:45:53,405 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T06:45:53,406 INFO [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=783303, jitterRate=-0.003978908061981201}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-14T06:45:53,407 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731566753385Initializing all the Stores at 1731566753386 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731566753386Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731566753387 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731566753387Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731566753387Cleaning up temporary data from old regions at 1731566753401 (+14 ms)Region opened successfully at 1731566753407 (+6 ms) 2024-11-14T06:45:53,408 INFO [master/20680646cf8a:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-14T06:45:53,412 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@41f2bdfa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=20680646cf8a/172.17.0.2:0 2024-11-14T06:45:53,413 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-14T06:45:53,413 INFO [master/20680646cf8a:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-14T06:45:53,413 INFO [master/20680646cf8a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-14T06:45:53,413 INFO [master/20680646cf8a:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-14T06:45:53,414 INFO [master/20680646cf8a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-14T06:45:53,414 INFO [master/20680646cf8a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-14T06:45:53,414 INFO [master/20680646cf8a:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-14T06:45:53,417 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-14T06:45:53,418 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38495-0x1003cfb32220000, quorum=127.0.0.1:50335, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-14T06:45:53,419 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-14T06:45:53,419 INFO [master/20680646cf8a:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-14T06:45:53,420 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38495-0x1003cfb32220000, quorum=127.0.0.1:50335, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-14T06:45:53,420 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-14T06:45:53,421 INFO [master/20680646cf8a:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-14T06:45:53,422 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38495-0x1003cfb32220000, quorum=127.0.0.1:50335, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-14T06:45:53,423 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-14T06:45:53,424 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38495-0x1003cfb32220000, quorum=127.0.0.1:50335, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-14T06:45:53,424 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-14T06:45:53,427 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38495-0x1003cfb32220000, quorum=127.0.0.1:50335, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-14T06:45:53,428 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-14T06:45:53,429 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38495-0x1003cfb32220000, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T06:45:53,429 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39105-0x1003cfb32220001, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T06:45:53,429 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38495-0x1003cfb32220000, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:45:53,429 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39105-0x1003cfb32220001, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:45:53,429 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=20680646cf8a,38495,1731566753222, sessionid=0x1003cfb32220000, setting cluster-up flag (Was=false) 2024-11-14T06:45:53,431 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38495-0x1003cfb32220000, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:45:53,431 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39105-0x1003cfb32220001, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:45:53,434 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-14T06:45:53,435 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=20680646cf8a,38495,1731566753222 2024-11-14T06:45:53,438 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39105-0x1003cfb32220001, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:45:53,438 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38495-0x1003cfb32220000, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:45:53,442 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-14T06:45:53,443 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=20680646cf8a,38495,1731566753222 2024-11-14T06:45:53,444 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-14T06:45:53,446 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-14T06:45:53,446 INFO [master/20680646cf8a:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-14T06:45:53,446 INFO [master/20680646cf8a:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-14T06:45:53,446 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 20680646cf8a,38495,1731566753222 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-14T06:45:53,448 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/20680646cf8a:0, corePoolSize=5, maxPoolSize=5 2024-11-14T06:45:53,448 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/20680646cf8a:0, corePoolSize=5, maxPoolSize=5 2024-11-14T06:45:53,448 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/20680646cf8a:0, corePoolSize=5, maxPoolSize=5 2024-11-14T06:45:53,448 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/20680646cf8a:0, corePoolSize=5, maxPoolSize=5 2024-11-14T06:45:53,448 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/20680646cf8a:0, corePoolSize=10, maxPoolSize=10 2024-11-14T06:45:53,448 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:45:53,449 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/20680646cf8a:0, corePoolSize=2, maxPoolSize=2 2024-11-14T06:45:53,449 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:45:53,451 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T06:45:53,451 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-14T06:45:53,452 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:45:53,452 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-14T06:45:53,457 INFO [master/20680646cf8a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731566783457 2024-11-14T06:45:53,457 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-14T06:45:53,457 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-14T06:45:53,457 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-14T06:45:53,457 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-14T06:45:53,457 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-14T06:45:53,457 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-14T06:45:53,458 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T06:45:53,458 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-14T06:45:53,458 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-14T06:45:53,459 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-14T06:45:53,461 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-14T06:45:53,461 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-14T06:45:53,463 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/20680646cf8a:0:becomeActiveMaster-HFileCleaner.large.0-1731566753461,5,FailOnTimeoutGroup] 2024-11-14T06:45:53,465 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/20680646cf8a:0:becomeActiveMaster-HFileCleaner.small.0-1731566753463,5,FailOnTimeoutGroup] 2024-11-14T06:45:53,465 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T06:45:53,465 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-14T06:45:53,465 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-14T06:45:53,465 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-14T06:45:53,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43453 is added to blk_1073741831_1007 (size=1321) 2024-11-14T06:45:53,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36149 is added to blk_1073741831_1007 (size=1321) 2024-11-14T06:45:53,472 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-14T06:45:53,472 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9 2024-11-14T06:45:53,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36149 is added to blk_1073741832_1008 (size=32) 2024-11-14T06:45:53,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43453 is added to blk_1073741832_1008 (size=32) 2024-11-14T06:45:53,481 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T06:45:53,485 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T06:45:53,487 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T06:45:53,487 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:45:53,488 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T06:45:53,488 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T06:45:53,490 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T06:45:53,490 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:45:53,490 INFO [RS:0;20680646cf8a:39105 {}] regionserver.HRegionServer(746): ClusterId : 03b2ca6a-61ff-4686-b751-cc5729d8fbfc 2024-11-14T06:45:53,490 DEBUG [RS:0;20680646cf8a:39105 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-14T06:45:53,490 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T06:45:53,491 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T06:45:53,492 DEBUG [RS:0;20680646cf8a:39105 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-14T06:45:53,492 DEBUG [RS:0;20680646cf8a:39105 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-14T06:45:53,492 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T06:45:53,492 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:45:53,493 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T06:45:53,493 DEBUG [RS:0;20680646cf8a:39105 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-14T06:45:53,493 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T06:45:53,494 DEBUG [RS:0;20680646cf8a:39105 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4b567beb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=20680646cf8a/172.17.0.2:0 2024-11-14T06:45:53,495 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T06:45:53,496 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:45:53,496 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T06:45:53,496 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T06:45:53,497 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/hbase/meta/1588230740 2024-11-14T06:45:53,498 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/hbase/meta/1588230740 2024-11-14T06:45:53,499 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T06:45:53,499 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T06:45:53,500 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T06:45:53,501 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T06:45:53,503 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T06:45:53,504 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=814628, jitterRate=0.03585386276245117}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T06:45:53,505 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731566753481Initializing all the Stores at 1731566753482 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731566753482Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731566753485 (+3 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731566753485Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731566753485Cleaning up temporary data from old regions at 1731566753499 (+14 ms)Region opened successfully at 1731566753505 (+6 ms) 2024-11-14T06:45:53,505 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T06:45:53,505 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T06:45:53,505 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T06:45:53,505 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T06:45:53,505 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T06:45:53,505 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T06:45:53,506 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731566753505Disabling compacts and flushes for region at 1731566753505Disabling writes for close at 1731566753505Writing region close event to WAL at 1731566753505Closed at 1731566753505 2024-11-14T06:45:53,506 DEBUG [RS:0;20680646cf8a:39105 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;20680646cf8a:39105 2024-11-14T06:45:53,506 INFO [RS:0;20680646cf8a:39105 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-14T06:45:53,506 INFO [RS:0;20680646cf8a:39105 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-14T06:45:53,506 DEBUG [RS:0;20680646cf8a:39105 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-14T06:45:53,507 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T06:45:53,507 INFO [RS:0;20680646cf8a:39105 {}] regionserver.HRegionServer(2659): reportForDuty to master=20680646cf8a,38495,1731566753222 with port=39105, startcode=1731566753272 2024-11-14T06:45:53,507 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-14T06:45:53,507 DEBUG [RS:0;20680646cf8a:39105 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-14T06:45:53,507 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-14T06:45:53,509 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T06:45:53,510 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44445, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-14T06:45:53,510 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-14T06:45:53,510 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38495 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 20680646cf8a,39105,1731566753272 2024-11-14T06:45:53,511 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38495 {}] master.ServerManager(517): Registering regionserver=20680646cf8a,39105,1731566753272 2024-11-14T06:45:53,512 DEBUG [RS:0;20680646cf8a:39105 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9 2024-11-14T06:45:53,512 DEBUG [RS:0;20680646cf8a:39105 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33995 2024-11-14T06:45:53,512 DEBUG [RS:0;20680646cf8a:39105 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-14T06:45:53,514 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38495-0x1003cfb32220000, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T06:45:53,514 DEBUG [RS:0;20680646cf8a:39105 {}] zookeeper.ZKUtil(111): regionserver:39105-0x1003cfb32220001, quorum=127.0.0.1:50335, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/20680646cf8a,39105,1731566753272 2024-11-14T06:45:53,515 WARN [RS:0;20680646cf8a:39105 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T06:45:53,515 INFO [RS:0;20680646cf8a:39105 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T06:45:53,515 DEBUG [RS:0;20680646cf8a:39105 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272 2024-11-14T06:45:53,515 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [20680646cf8a,39105,1731566753272] 2024-11-14T06:45:53,520 INFO [RS:0;20680646cf8a:39105 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-14T06:45:53,523 INFO [RS:0;20680646cf8a:39105 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-14T06:45:53,523 INFO [RS:0;20680646cf8a:39105 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-14T06:45:53,523 INFO [RS:0;20680646cf8a:39105 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T06:45:53,524 INFO [RS:0;20680646cf8a:39105 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-14T06:45:53,524 INFO [RS:0;20680646cf8a:39105 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-14T06:45:53,525 INFO [RS:0;20680646cf8a:39105 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-14T06:45:53,525 DEBUG [RS:0;20680646cf8a:39105 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:45:53,525 DEBUG [RS:0;20680646cf8a:39105 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:45:53,525 DEBUG [RS:0;20680646cf8a:39105 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:45:53,525 DEBUG [RS:0;20680646cf8a:39105 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:45:53,525 DEBUG [RS:0;20680646cf8a:39105 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:45:53,525 DEBUG [RS:0;20680646cf8a:39105 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/20680646cf8a:0, corePoolSize=2, maxPoolSize=2 2024-11-14T06:45:53,525 DEBUG [RS:0;20680646cf8a:39105 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:45:53,525 DEBUG [RS:0;20680646cf8a:39105 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:45:53,525 DEBUG [RS:0;20680646cf8a:39105 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:45:53,525 DEBUG [RS:0;20680646cf8a:39105 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:45:53,525 DEBUG [RS:0;20680646cf8a:39105 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:45:53,525 DEBUG [RS:0;20680646cf8a:39105 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:45:53,526 DEBUG [RS:0;20680646cf8a:39105 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/20680646cf8a:0, corePoolSize=3, maxPoolSize=3 2024-11-14T06:45:53,526 DEBUG [RS:0;20680646cf8a:39105 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/20680646cf8a:0, corePoolSize=3, maxPoolSize=3 2024-11-14T06:45:53,527 INFO [RS:0;20680646cf8a:39105 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T06:45:53,527 INFO [RS:0;20680646cf8a:39105 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T06:45:53,527 INFO [RS:0;20680646cf8a:39105 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T06:45:53,527 INFO [RS:0;20680646cf8a:39105 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-14T06:45:53,527 INFO [RS:0;20680646cf8a:39105 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-14T06:45:53,527 INFO [RS:0;20680646cf8a:39105 {}] hbase.ChoreService(168): Chore ScheduledChore name=20680646cf8a,39105,1731566753272-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T06:45:53,541 INFO [RS:0;20680646cf8a:39105 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-14T06:45:53,541 INFO [RS:0;20680646cf8a:39105 {}] hbase.ChoreService(168): Chore ScheduledChore name=20680646cf8a,39105,1731566753272-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T06:45:53,541 INFO [RS:0;20680646cf8a:39105 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T06:45:53,542 INFO [RS:0;20680646cf8a:39105 {}] regionserver.Replication(171): 20680646cf8a,39105,1731566753272 started 2024-11-14T06:45:53,556 INFO [RS:0;20680646cf8a:39105 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T06:45:53,556 INFO [RS:0;20680646cf8a:39105 {}] regionserver.HRegionServer(1482): Serving as 20680646cf8a,39105,1731566753272, RpcServer on 20680646cf8a/172.17.0.2:39105, sessionid=0x1003cfb32220001 2024-11-14T06:45:53,556 DEBUG [RS:0;20680646cf8a:39105 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-14T06:45:53,556 DEBUG [RS:0;20680646cf8a:39105 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 20680646cf8a,39105,1731566753272 2024-11-14T06:45:53,556 DEBUG [RS:0;20680646cf8a:39105 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '20680646cf8a,39105,1731566753272' 2024-11-14T06:45:53,556 DEBUG [RS:0;20680646cf8a:39105 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-14T06:45:53,557 DEBUG [RS:0;20680646cf8a:39105 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-14T06:45:53,557 DEBUG [RS:0;20680646cf8a:39105 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-14T06:45:53,558 DEBUG [RS:0;20680646cf8a:39105 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-14T06:45:53,558 DEBUG [RS:0;20680646cf8a:39105 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 20680646cf8a,39105,1731566753272 2024-11-14T06:45:53,558 DEBUG [RS:0;20680646cf8a:39105 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '20680646cf8a,39105,1731566753272' 2024-11-14T06:45:53,558 DEBUG [RS:0;20680646cf8a:39105 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-14T06:45:53,558 DEBUG [RS:0;20680646cf8a:39105 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-14T06:45:53,558 DEBUG [RS:0;20680646cf8a:39105 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-14T06:45:53,558 INFO [RS:0;20680646cf8a:39105 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-14T06:45:53,559 INFO [RS:0;20680646cf8a:39105 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-14T06:45:53,661 WARN [20680646cf8a:38495 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-14T06:45:53,663 INFO [RS:0;20680646cf8a:39105 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=20680646cf8a%2C39105%2C1731566753272, suffix=, logDir=hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272, archiveDir=hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/oldWALs, maxLogs=32 2024-11-14T06:45:53,665 INFO [RS:0;20680646cf8a:39105 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 20680646cf8a%2C39105%2C1731566753272.1731566753665 2024-11-14T06:45:53,673 INFO [RS:0;20680646cf8a:39105 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.1731566753665 2024-11-14T06:45:53,675 DEBUG [RS:0;20680646cf8a:39105 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46257:46257),(127.0.0.1/127.0.0.1:46603:46603)] 2024-11-14T06:45:53,911 DEBUG [20680646cf8a:38495 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-14T06:45:53,912 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=20680646cf8a,39105,1731566753272 2024-11-14T06:45:53,916 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 20680646cf8a,39105,1731566753272, state=OPENING 2024-11-14T06:45:53,918 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-14T06:45:53,921 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38495-0x1003cfb32220000, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:45:53,921 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39105-0x1003cfb32220001, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:45:53,922 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T06:45:53,922 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T06:45:53,922 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T06:45:53,922 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=20680646cf8a,39105,1731566753272}] 2024-11-14T06:45:54,077 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-14T06:45:54,082 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54355, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-14T06:45:54,089 INFO [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-14T06:45:54,089 INFO [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T06:45:54,092 INFO [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=20680646cf8a%2C39105%2C1731566753272.meta, suffix=.meta, logDir=hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272, archiveDir=hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/oldWALs, maxLogs=32 2024-11-14T06:45:54,094 INFO [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta 2024-11-14T06:45:54,101 INFO [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta 2024-11-14T06:45:54,105 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46257:46257),(127.0.0.1/127.0.0.1:46603:46603)] 2024-11-14T06:45:54,106 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-14T06:45:54,106 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-14T06:45:54,106 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-14T06:45:54,106 INFO [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-14T06:45:54,107 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-14T06:45:54,107 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T06:45:54,107 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-14T06:45:54,107 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-14T06:45:54,108 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T06:45:54,109 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T06:45:54,110 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:45:54,110 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T06:45:54,110 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T06:45:54,111 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T06:45:54,111 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:45:54,112 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T06:45:54,112 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T06:45:54,112 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T06:45:54,113 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:45:54,113 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T06:45:54,113 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T06:45:54,114 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T06:45:54,114 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:45:54,114 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T06:45:54,115 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T06:45:54,115 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/hbase/meta/1588230740 2024-11-14T06:45:54,117 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/hbase/meta/1588230740 2024-11-14T06:45:54,118 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T06:45:54,118 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T06:45:54,119 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T06:45:54,120 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T06:45:54,121 INFO [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=705171, jitterRate=-0.10332879424095154}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T06:45:54,121 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-14T06:45:54,122 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731566754107Writing region info on filesystem at 1731566754107Initializing all the Stores at 1731566754108 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731566754108Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731566754108Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731566754108Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731566754108Cleaning up temporary data from old regions at 1731566754118 (+10 ms)Running coprocessor post-open hooks at 1731566754121 (+3 ms)Region opened successfully at 1731566754122 (+1 ms) 2024-11-14T06:45:54,123 INFO [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731566754077 2024-11-14T06:45:54,126 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-14T06:45:54,126 INFO [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-14T06:45:54,147 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=20680646cf8a,39105,1731566753272 2024-11-14T06:45:54,149 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 20680646cf8a,39105,1731566753272, state=OPEN 2024-11-14T06:45:54,151 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38495-0x1003cfb32220000, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T06:45:54,151 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39105-0x1003cfb32220001, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T06:45:54,151 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=20680646cf8a,39105,1731566753272 2024-11-14T06:45:54,151 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T06:45:54,151 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T06:45:54,155 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-14T06:45:54,155 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=20680646cf8a,39105,1731566753272 in 229 msec 2024-11-14T06:45:54,159 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-14T06:45:54,159 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 648 msec 2024-11-14T06:45:54,160 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T06:45:54,161 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-14T06:45:54,162 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T06:45:54,162 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=20680646cf8a,39105,1731566753272, seqNum=-1] 2024-11-14T06:45:54,163 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T06:45:54,164 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51193, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T06:45:54,172 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 725 msec 2024-11-14T06:45:54,172 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731566754172, completionTime=-1 2024-11-14T06:45:54,173 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-14T06:45:54,173 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-14T06:45:54,175 INFO [master/20680646cf8a:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-14T06:45:54,176 INFO [master/20680646cf8a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731566814176 2024-11-14T06:45:54,176 INFO [master/20680646cf8a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731566874176 2024-11-14T06:45:54,176 INFO [master/20680646cf8a:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 3 msec 2024-11-14T06:45:54,176 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=20680646cf8a,38495,1731566753222-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T06:45:54,176 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=20680646cf8a,38495,1731566753222-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T06:45:54,176 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=20680646cf8a,38495,1731566753222-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T06:45:54,176 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-20680646cf8a:38495, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T06:45:54,177 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-14T06:45:54,177 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-14T06:45:54,180 DEBUG [master/20680646cf8a:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-14T06:45:54,183 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.879sec 2024-11-14T06:45:54,183 INFO [master/20680646cf8a:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-14T06:45:54,183 INFO [master/20680646cf8a:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-14T06:45:54,183 INFO [master/20680646cf8a:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-14T06:45:54,183 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-14T06:45:54,183 INFO [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-14T06:45:54,183 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=20680646cf8a,38495,1731566753222-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T06:45:54,184 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=20680646cf8a,38495,1731566753222-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-14T06:45:54,191 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47d1c4f2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T06:45:54,191 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 20680646cf8a,38495,-1 for getting cluster id 2024-11-14T06:45:54,191 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-14T06:45:54,197 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '03b2ca6a-61ff-4686-b751-cc5729d8fbfc' 2024-11-14T06:45:54,197 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-14T06:45:54,197 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-14T06:45:54,197 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-14T06:45:54,197 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=20680646cf8a,38495,1731566753222-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T06:45:54,197 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "03b2ca6a-61ff-4686-b751-cc5729d8fbfc" 2024-11-14T06:45:54,198 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@132de1c8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T06:45:54,198 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [20680646cf8a,38495,-1] 2024-11-14T06:45:54,198 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-14T06:45:54,199 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T06:45:54,200 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45136, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-14T06:45:54,201 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@597807df, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T06:45:54,202 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T06:45:54,203 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=20680646cf8a,39105,1731566753272, seqNum=-1] 2024-11-14T06:45:54,204 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T06:45:54,207 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58436, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T06:45:54,209 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=20680646cf8a,38495,1731566753222 2024-11-14T06:45:54,210 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T06:45:54,213 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-14T06:45:54,232 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/20680646cf8a:0 server-side Connection retries=45 2024-11-14T06:45:54,232 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T06:45:54,232 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T06:45:54,232 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T06:45:54,232 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T06:45:54,232 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T06:45:54,233 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-14T06:45:54,233 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T06:45:54,237 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37019 2024-11-14T06:45:54,240 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37019 connecting to ZooKeeper ensemble=127.0.0.1:50335 2024-11-14T06:45:54,241 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T06:45:54,243 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T06:45:54,248 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:370190x0, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T06:45:54,249 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:370190x0, quorum=127.0.0.1:50335, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-11-14T06:45:54,249 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-11-14T06:45:54,250 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37019-0x1003cfb32220002 connected 2024-11-14T06:45:54,250 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-14T06:45:54,257 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-14T06:45:54,258 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:37019-0x1003cfb32220002, quorum=127.0.0.1:50335, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-14T06:45:54,260 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37019-0x1003cfb32220002, quorum=127.0.0.1:50335, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T06:45:54,261 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37019 2024-11-14T06:45:54,266 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37019 2024-11-14T06:45:54,267 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37019 2024-11-14T06:45:54,270 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37019 2024-11-14T06:45:54,271 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37019 2024-11-14T06:45:54,272 INFO [RS:1;20680646cf8a:37019 {}] regionserver.HRegionServer(746): ClusterId : 03b2ca6a-61ff-4686-b751-cc5729d8fbfc 2024-11-14T06:45:54,272 DEBUG [RS:1;20680646cf8a:37019 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-14T06:45:54,273 DEBUG [RS:1;20680646cf8a:37019 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-14T06:45:54,273 DEBUG [RS:1;20680646cf8a:37019 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-14T06:45:54,275 DEBUG [RS:1;20680646cf8a:37019 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-14T06:45:54,275 DEBUG [RS:1;20680646cf8a:37019 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4aedb955, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=20680646cf8a/172.17.0.2:0 2024-11-14T06:45:54,289 DEBUG [RS:1;20680646cf8a:37019 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;20680646cf8a:37019 2024-11-14T06:45:54,289 INFO [RS:1;20680646cf8a:37019 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-14T06:45:54,289 INFO [RS:1;20680646cf8a:37019 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-14T06:45:54,289 DEBUG [RS:1;20680646cf8a:37019 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-14T06:45:54,290 INFO [RS:1;20680646cf8a:37019 {}] regionserver.HRegionServer(2659): reportForDuty to master=20680646cf8a,38495,1731566753222 with port=37019, startcode=1731566754231 2024-11-14T06:45:54,290 DEBUG [RS:1;20680646cf8a:37019 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-14T06:45:54,292 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54585, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-14T06:45:54,292 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38495 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 20680646cf8a,37019,1731566754231 2024-11-14T06:45:54,292 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38495 {}] master.ServerManager(517): Registering regionserver=20680646cf8a,37019,1731566754231 2024-11-14T06:45:54,294 DEBUG [RS:1;20680646cf8a:37019 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9 2024-11-14T06:45:54,294 DEBUG [RS:1;20680646cf8a:37019 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33995 2024-11-14T06:45:54,294 DEBUG [RS:1;20680646cf8a:37019 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-14T06:45:54,295 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38495-0x1003cfb32220000, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T06:45:54,295 DEBUG [RS:1;20680646cf8a:37019 {}] zookeeper.ZKUtil(111): regionserver:37019-0x1003cfb32220002, quorum=127.0.0.1:50335, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/20680646cf8a,37019,1731566754231 2024-11-14T06:45:54,295 WARN [RS:1;20680646cf8a:37019 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T06:45:54,295 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [20680646cf8a,37019,1731566754231] 2024-11-14T06:45:54,295 INFO [RS:1;20680646cf8a:37019 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T06:45:54,296 DEBUG [RS:1;20680646cf8a:37019 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231 2024-11-14T06:45:54,299 INFO [RS:1;20680646cf8a:37019 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-14T06:45:54,301 INFO [RS:1;20680646cf8a:37019 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-14T06:45:54,302 INFO [RS:1;20680646cf8a:37019 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-14T06:45:54,302 INFO [RS:1;20680646cf8a:37019 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T06:45:54,302 INFO [RS:1;20680646cf8a:37019 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-14T06:45:54,303 INFO [RS:1;20680646cf8a:37019 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-14T06:45:54,303 INFO [RS:1;20680646cf8a:37019 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-14T06:45:54,303 DEBUG [RS:1;20680646cf8a:37019 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:45:54,303 DEBUG [RS:1;20680646cf8a:37019 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:45:54,303 DEBUG [RS:1;20680646cf8a:37019 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:45:54,303 DEBUG [RS:1;20680646cf8a:37019 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:45:54,303 DEBUG [RS:1;20680646cf8a:37019 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:45:54,304 DEBUG [RS:1;20680646cf8a:37019 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/20680646cf8a:0, corePoolSize=2, maxPoolSize=2 2024-11-14T06:45:54,304 DEBUG [RS:1;20680646cf8a:37019 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:45:54,304 DEBUG [RS:1;20680646cf8a:37019 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:45:54,304 DEBUG [RS:1;20680646cf8a:37019 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:45:54,304 DEBUG [RS:1;20680646cf8a:37019 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:45:54,304 DEBUG [RS:1;20680646cf8a:37019 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:45:54,304 DEBUG [RS:1;20680646cf8a:37019 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:45:54,304 DEBUG [RS:1;20680646cf8a:37019 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/20680646cf8a:0, corePoolSize=3, maxPoolSize=3 2024-11-14T06:45:54,304 DEBUG [RS:1;20680646cf8a:37019 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/20680646cf8a:0, corePoolSize=3, maxPoolSize=3 2024-11-14T06:45:54,304 INFO [RS:1;20680646cf8a:37019 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T06:45:54,304 INFO [RS:1;20680646cf8a:37019 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T06:45:54,305 INFO [RS:1;20680646cf8a:37019 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T06:45:54,305 INFO [RS:1;20680646cf8a:37019 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-14T06:45:54,305 INFO [RS:1;20680646cf8a:37019 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-14T06:45:54,305 INFO [RS:1;20680646cf8a:37019 {}] hbase.ChoreService(168): Chore ScheduledChore name=20680646cf8a,37019,1731566754231-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T06:45:54,325 INFO [RS:1;20680646cf8a:37019 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-14T06:45:54,325 INFO [RS:1;20680646cf8a:37019 {}] hbase.ChoreService(168): Chore ScheduledChore name=20680646cf8a,37019,1731566754231-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T06:45:54,325 INFO [RS:1;20680646cf8a:37019 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T06:45:54,325 INFO [RS:1;20680646cf8a:37019 {}] regionserver.Replication(171): 20680646cf8a,37019,1731566754231 started 2024-11-14T06:45:54,341 INFO [RS:1;20680646cf8a:37019 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T06:45:54,341 INFO [RS:1;20680646cf8a:37019 {}] regionserver.HRegionServer(1482): Serving as 20680646cf8a,37019,1731566754231, RpcServer on 20680646cf8a/172.17.0.2:37019, sessionid=0x1003cfb32220002 2024-11-14T06:45:54,341 DEBUG [RS:1;20680646cf8a:37019 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-14T06:45:54,341 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;20680646cf8a:37019,5,FailOnTimeoutGroup] 2024-11-14T06:45:54,341 DEBUG [RS:1;20680646cf8a:37019 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 20680646cf8a,37019,1731566754231 2024-11-14T06:45:54,341 DEBUG [RS:1;20680646cf8a:37019 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '20680646cf8a,37019,1731566754231' 2024-11-14T06:45:54,342 DEBUG [RS:1;20680646cf8a:37019 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-14T06:45:54,342 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-11-14T06:45:54,342 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-14T06:45:54,342 DEBUG [RS:1;20680646cf8a:37019 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-14T06:45:54,343 DEBUG [RS:1;20680646cf8a:37019 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-14T06:45:54,343 DEBUG [RS:1;20680646cf8a:37019 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-14T06:45:54,343 DEBUG [RS:1;20680646cf8a:37019 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 20680646cf8a,37019,1731566754231 2024-11-14T06:45:54,343 DEBUG [RS:1;20680646cf8a:37019 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '20680646cf8a,37019,1731566754231' 2024-11-14T06:45:54,343 DEBUG [RS:1;20680646cf8a:37019 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-14T06:45:54,343 DEBUG [RS:1;20680646cf8a:37019 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-14T06:45:54,343 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is 20680646cf8a,38495,1731566753222 2024-11-14T06:45:54,343 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@63a82de5 2024-11-14T06:45:54,344 DEBUG [RS:1;20680646cf8a:37019 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-14T06:45:54,344 INFO [RS:1;20680646cf8a:37019 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-14T06:45:54,344 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-14T06:45:54,344 INFO [RS:1;20680646cf8a:37019 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-14T06:45:54,345 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45144, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-14T06:45:54,346 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38495 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-14T06:45:54,346 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38495 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-14T06:45:54,346 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38495 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-14T06:45:54,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38495 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-11-14T06:45:54,349 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-11-14T06:45:54,349 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:45:54,349 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38495 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-11-14T06:45:54,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38495 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-14T06:45:54,350 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-14T06:45:54,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36149 is added to blk_1073741835_1011 (size=393) 2024-11-14T06:45:54,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43453 is added to blk_1073741835_1011 (size=393) 2024-11-14T06:45:54,360 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 4c50aa91d588b8b257a9bbc495433f09, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731566754346.4c50aa91d588b8b257a9bbc495433f09.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9 2024-11-14T06:45:54,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36149 is added to blk_1073741836_1012 (size=76) 2024-11-14T06:45:54,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43453 is added to blk_1073741836_1012 (size=76) 2024-11-14T06:45:54,367 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731566754346.4c50aa91d588b8b257a9bbc495433f09.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T06:45:54,367 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing 4c50aa91d588b8b257a9bbc495433f09, disabling compactions & flushes 2024-11-14T06:45:54,367 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731566754346.4c50aa91d588b8b257a9bbc495433f09. 2024-11-14T06:45:54,367 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731566754346.4c50aa91d588b8b257a9bbc495433f09. 2024-11-14T06:45:54,368 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731566754346.4c50aa91d588b8b257a9bbc495433f09. after waiting 0 ms 2024-11-14T06:45:54,368 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731566754346.4c50aa91d588b8b257a9bbc495433f09. 2024-11-14T06:45:54,368 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731566754346.4c50aa91d588b8b257a9bbc495433f09. 2024-11-14T06:45:54,368 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for 4c50aa91d588b8b257a9bbc495433f09: Waiting for close lock at 1731566754367Disabling compacts and flushes for region at 1731566754367Disabling writes for close at 1731566754368 (+1 ms)Writing region close event to WAL at 1731566754368Closed at 1731566754368 2024-11-14T06:45:54,369 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-11-14T06:45:54,369 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1731566754346.4c50aa91d588b8b257a9bbc495433f09.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1731566754369"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731566754369"}]},"ts":"1731566754369"} 2024-11-14T06:45:54,372 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-14T06:45:54,373 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-14T06:45:54,373 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731566754373"}]},"ts":"1731566754373"} 2024-11-14T06:45:54,375 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-11-14T06:45:54,376 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=4c50aa91d588b8b257a9bbc495433f09, ASSIGN}] 2024-11-14T06:45:54,378 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=4c50aa91d588b8b257a9bbc495433f09, ASSIGN 2024-11-14T06:45:54,379 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=4c50aa91d588b8b257a9bbc495433f09, ASSIGN; state=OFFLINE, location=20680646cf8a,39105,1731566753272; forceNewPlan=false, retain=false 2024-11-14T06:45:54,448 INFO [RS:1;20680646cf8a:37019 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=20680646cf8a%2C37019%2C1731566754231, suffix=, logDir=hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231, archiveDir=hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/oldWALs, maxLogs=32 2024-11-14T06:45:54,450 INFO [RS:1;20680646cf8a:37019 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 20680646cf8a%2C37019%2C1731566754231.1731566754449 2024-11-14T06:45:54,458 INFO [RS:1;20680646cf8a:37019 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 2024-11-14T06:45:54,458 DEBUG [RS:1;20680646cf8a:37019 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46603:46603),(127.0.0.1/127.0.0.1:46257:46257)] 2024-11-14T06:45:54,528 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-14T06:45:54,530 INFO [20680646cf8a:38495 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-14T06:45:54,531 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=4c50aa91d588b8b257a9bbc495433f09, regionState=OPENING, regionLocation=20680646cf8a,39105,1731566753272 2024-11-14T06:45:54,534 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=4c50aa91d588b8b257a9bbc495433f09, ASSIGN because future has completed 2024-11-14T06:45:54,535 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4c50aa91d588b8b257a9bbc495433f09, server=20680646cf8a,39105,1731566753272}] 2024-11-14T06:45:54,698 INFO [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1731566754346.4c50aa91d588b8b257a9bbc495433f09. 2024-11-14T06:45:54,699 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 4c50aa91d588b8b257a9bbc495433f09, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731566754346.4c50aa91d588b8b257a9bbc495433f09.', STARTKEY => '', ENDKEY => ''} 2024-11-14T06:45:54,699 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath 4c50aa91d588b8b257a9bbc495433f09 2024-11-14T06:45:54,699 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731566754346.4c50aa91d588b8b257a9bbc495433f09.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T06:45:54,700 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 4c50aa91d588b8b257a9bbc495433f09 2024-11-14T06:45:54,700 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 4c50aa91d588b8b257a9bbc495433f09 2024-11-14T06:45:54,702 INFO [StoreOpener-4c50aa91d588b8b257a9bbc495433f09-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 4c50aa91d588b8b257a9bbc495433f09 2024-11-14T06:45:54,703 INFO [StoreOpener-4c50aa91d588b8b257a9bbc495433f09-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4c50aa91d588b8b257a9bbc495433f09 columnFamilyName info 2024-11-14T06:45:54,703 DEBUG [StoreOpener-4c50aa91d588b8b257a9bbc495433f09-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:45:54,704 INFO [StoreOpener-4c50aa91d588b8b257a9bbc495433f09-1 {}] regionserver.HStore(327): Store=4c50aa91d588b8b257a9bbc495433f09/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T06:45:54,704 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 4c50aa91d588b8b257a9bbc495433f09 2024-11-14T06:45:54,705 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09 2024-11-14T06:45:54,705 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09 2024-11-14T06:45:54,706 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 4c50aa91d588b8b257a9bbc495433f09 2024-11-14T06:45:54,706 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 4c50aa91d588b8b257a9bbc495433f09 2024-11-14T06:45:54,708 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 4c50aa91d588b8b257a9bbc495433f09 2024-11-14T06:45:54,710 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T06:45:54,711 INFO [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 4c50aa91d588b8b257a9bbc495433f09; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=794591, jitterRate=0.010375335812568665}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-14T06:45:54,711 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 4c50aa91d588b8b257a9bbc495433f09 2024-11-14T06:45:54,711 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 4c50aa91d588b8b257a9bbc495433f09: Running coprocessor pre-open hook at 1731566754700Writing region info on filesystem at 1731566754700Initializing all the Stores at 1731566754701 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731566754701Cleaning up temporary data from old regions at 1731566754706 (+5 ms)Running coprocessor post-open hooks at 1731566754711 (+5 ms)Region opened successfully at 1731566754711 2024-11-14T06:45:54,713 INFO [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1731566754346.4c50aa91d588b8b257a9bbc495433f09., pid=6, masterSystemTime=1731566754690 2024-11-14T06:45:54,715 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1731566754346.4c50aa91d588b8b257a9bbc495433f09. 2024-11-14T06:45:54,716 INFO [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1731566754346.4c50aa91d588b8b257a9bbc495433f09. 2024-11-14T06:45:54,716 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=4c50aa91d588b8b257a9bbc495433f09, regionState=OPEN, openSeqNum=2, regionLocation=20680646cf8a,39105,1731566753272 2024-11-14T06:45:54,719 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4c50aa91d588b8b257a9bbc495433f09, server=20680646cf8a,39105,1731566753272 because future has completed 2024-11-14T06:45:54,723 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-14T06:45:54,723 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 4c50aa91d588b8b257a9bbc495433f09, server=20680646cf8a,39105,1731566753272 in 186 msec 2024-11-14T06:45:54,726 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-14T06:45:54,727 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=4c50aa91d588b8b257a9bbc495433f09, ASSIGN in 347 msec 2024-11-14T06:45:54,728 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-14T06:45:54,728 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731566754728"}]},"ts":"1731566754728"} 2024-11-14T06:45:54,730 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-11-14T06:45:54,732 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-11-14T06:45:54,736 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 386 msec 2024-11-14T06:45:54,914 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:45:54,920 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:45:55,439 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-14T06:45:55,441 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:45:55,466 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:45:55,467 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:45:55,468 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:45:59,521 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-14T06:45:59,523 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-11-14T06:46:04,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38495 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-14T06:46:04,425 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-11-14T06:46:04,425 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-11-14T06:46:04,428 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-11-14T06:46:04,428 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1731566754346.4c50aa91d588b8b257a9bbc495433f09. 2024-11-14T06:46:04,439 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T06:46:04,442 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T06:46:04,443 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T06:46:04,443 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T06:46:04,443 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T06:46:04,444 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7b53e4f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/hadoop.log.dir/,AVAILABLE} 2024-11-14T06:46:04,444 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@402062d6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T06:46:04,528 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-14T06:46:04,528 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-11-14T06:46:04,539 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7a2a3a4f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/java.io.tmpdir/jetty-localhost-37265-hadoop-hdfs-3_4_1-tests_jar-_-any-17528828034047120965/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T06:46:04,540 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7fa27241{HTTP/1.1, (http/1.1)}{localhost:37265} 2024-11-14T06:46:04,540 INFO [Time-limited test {}] server.Server(415): Started @116136ms 2024-11-14T06:46:04,541 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T06:46:04,570 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T06:46:04,574 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T06:46:04,575 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T06:46:04,575 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T06:46:04,575 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T06:46:04,575 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7ca4b7c9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/hadoop.log.dir/,AVAILABLE} 2024-11-14T06:46:04,575 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7c5e4864{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T06:46:04,598 WARN [Thread-828 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/data/data5/current/BP-1678146812-172.17.0.2-1731566752608/current, will proceed with Du for space computation calculation, 2024-11-14T06:46:04,598 WARN [Thread-829 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/data/data6/current/BP-1678146812-172.17.0.2-1731566752608/current, will proceed with Du for space computation calculation, 2024-11-14T06:46:04,613 WARN [Thread-808 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T06:46:04,616 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x54f7a4d27c1386bb with lease ID 0xba885f94c88789a4: Processing first storage report for DS-9d414d48-5dd3-4bdb-a1b3-ebac272af800 from datanode DatanodeRegistration(127.0.0.1:35733, datanodeUuid=92e56133-38bc-451b-a9b6-c6dd482e646f, infoPort=37411, infoSecurePort=0, ipcPort=43239, storageInfo=lv=-57;cid=testClusterID;nsid=1791374989;c=1731566752608) 2024-11-14T06:46:04,616 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x54f7a4d27c1386bb with lease ID 0xba885f94c88789a4: from storage DS-9d414d48-5dd3-4bdb-a1b3-ebac272af800 node DatanodeRegistration(127.0.0.1:35733, datanodeUuid=92e56133-38bc-451b-a9b6-c6dd482e646f, infoPort=37411, infoSecurePort=0, ipcPort=43239, storageInfo=lv=-57;cid=testClusterID;nsid=1791374989;c=1731566752608), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T06:46:04,616 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x54f7a4d27c1386bb with lease ID 0xba885f94c88789a4: Processing first storage report for DS-fd7be22d-9a77-458e-a2d6-d55f2de1c976 from datanode DatanodeRegistration(127.0.0.1:35733, datanodeUuid=92e56133-38bc-451b-a9b6-c6dd482e646f, infoPort=37411, infoSecurePort=0, ipcPort=43239, storageInfo=lv=-57;cid=testClusterID;nsid=1791374989;c=1731566752608) 2024-11-14T06:46:04,616 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x54f7a4d27c1386bb with lease ID 0xba885f94c88789a4: from storage DS-fd7be22d-9a77-458e-a2d6-d55f2de1c976 node DatanodeRegistration(127.0.0.1:35733, datanodeUuid=92e56133-38bc-451b-a9b6-c6dd482e646f, infoPort=37411, infoSecurePort=0, ipcPort=43239, storageInfo=lv=-57;cid=testClusterID;nsid=1791374989;c=1731566752608), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T06:46:04,673 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1d80e9a4{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/java.io.tmpdir/jetty-localhost-40449-hadoop-hdfs-3_4_1-tests_jar-_-any-14523387677221031308/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T06:46:04,673 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2c590722{HTTP/1.1, (http/1.1)}{localhost:40449} 2024-11-14T06:46:04,673 INFO [Time-limited test {}] server.Server(415): Started @116270ms 2024-11-14T06:46:04,674 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T06:46:04,706 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T06:46:04,710 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T06:46:04,711 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T06:46:04,711 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T06:46:04,711 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T06:46:04,713 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7c141b19{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/hadoop.log.dir/,AVAILABLE} 2024-11-14T06:46:04,714 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@25509568{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T06:46:04,732 WARN [Thread-863 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/data/data7/current/BP-1678146812-172.17.0.2-1731566752608/current, will proceed with Du for space computation calculation, 2024-11-14T06:46:04,732 WARN [Thread-864 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/data/data8/current/BP-1678146812-172.17.0.2-1731566752608/current, will proceed with Du for space computation calculation, 2024-11-14T06:46:04,747 WARN [Thread-843 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T06:46:04,749 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x59dfbd336fdd8d59 with lease ID 0xba885f94c88789a5: Processing first storage report for DS-c73e235d-0240-4454-afa5-0e0d9a19b6c4 from datanode DatanodeRegistration(127.0.0.1:45879, datanodeUuid=973ca49c-160b-49d4-b8ca-7cf03e483e79, infoPort=42603, infoSecurePort=0, ipcPort=45351, storageInfo=lv=-57;cid=testClusterID;nsid=1791374989;c=1731566752608) 2024-11-14T06:46:04,749 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x59dfbd336fdd8d59 with lease ID 0xba885f94c88789a5: from storage DS-c73e235d-0240-4454-afa5-0e0d9a19b6c4 node DatanodeRegistration(127.0.0.1:45879, datanodeUuid=973ca49c-160b-49d4-b8ca-7cf03e483e79, infoPort=42603, infoSecurePort=0, ipcPort=45351, storageInfo=lv=-57;cid=testClusterID;nsid=1791374989;c=1731566752608), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T06:46:04,749 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x59dfbd336fdd8d59 with lease ID 0xba885f94c88789a5: Processing first storage report for DS-6c5c2c26-0995-4f13-be8a-0bf64ba7a66e from datanode DatanodeRegistration(127.0.0.1:45879, datanodeUuid=973ca49c-160b-49d4-b8ca-7cf03e483e79, infoPort=42603, infoSecurePort=0, ipcPort=45351, storageInfo=lv=-57;cid=testClusterID;nsid=1791374989;c=1731566752608) 2024-11-14T06:46:04,749 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x59dfbd336fdd8d59 with lease ID 0xba885f94c88789a5: from storage DS-6c5c2c26-0995-4f13-be8a-0bf64ba7a66e node DatanodeRegistration(127.0.0.1:45879, datanodeUuid=973ca49c-160b-49d4-b8ca-7cf03e483e79, infoPort=42603, infoSecurePort=0, ipcPort=45351, storageInfo=lv=-57;cid=testClusterID;nsid=1791374989;c=1731566752608), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T06:46:04,815 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5b3e7853{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/java.io.tmpdir/jetty-localhost-44321-hadoop-hdfs-3_4_1-tests_jar-_-any-12671789123672266917/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T06:46:04,816 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@65429201{HTTP/1.1, (http/1.1)}{localhost:44321} 2024-11-14T06:46:04,816 INFO [Time-limited test {}] server.Server(415): Started @116412ms 2024-11-14T06:46:04,817 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T06:46:04,873 WARN [Thread-889 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/data/data9/current/BP-1678146812-172.17.0.2-1731566752608/current, will proceed with Du for space computation calculation, 2024-11-14T06:46:04,873 WARN [Thread-890 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/data/data10/current/BP-1678146812-172.17.0.2-1731566752608/current, will proceed with Du for space computation calculation, 2024-11-14T06:46:04,889 WARN [Thread-878 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T06:46:04,891 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb361799aec52e1a4 with lease ID 0xba885f94c88789a6: Processing first storage report for DS-5415e41b-ebdb-4ff1-838b-53415f043d44 from datanode DatanodeRegistration(127.0.0.1:40987, datanodeUuid=d7a86ca6-cdad-42e0-b252-0609960e1a8b, infoPort=45157, infoSecurePort=0, ipcPort=33975, storageInfo=lv=-57;cid=testClusterID;nsid=1791374989;c=1731566752608) 2024-11-14T06:46:04,891 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb361799aec52e1a4 with lease ID 0xba885f94c88789a6: from storage DS-5415e41b-ebdb-4ff1-838b-53415f043d44 node DatanodeRegistration(127.0.0.1:40987, datanodeUuid=d7a86ca6-cdad-42e0-b252-0609960e1a8b, infoPort=45157, infoSecurePort=0, ipcPort=33975, storageInfo=lv=-57;cid=testClusterID;nsid=1791374989;c=1731566752608), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T06:46:04,891 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb361799aec52e1a4 with lease ID 0xba885f94c88789a6: Processing first storage report for DS-1b83e4fb-43c3-4ca6-a074-226364a18007 from datanode DatanodeRegistration(127.0.0.1:40987, datanodeUuid=d7a86ca6-cdad-42e0-b252-0609960e1a8b, infoPort=45157, infoSecurePort=0, ipcPort=33975, storageInfo=lv=-57;cid=testClusterID;nsid=1791374989;c=1731566752608) 2024-11-14T06:46:04,891 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb361799aec52e1a4 with lease ID 0xba885f94c88789a6: from storage DS-1b83e4fb-43c3-4ca6-a074-226364a18007 node DatanodeRegistration(127.0.0.1:40987, datanodeUuid=d7a86ca6-cdad-42e0-b252-0609960e1a8b, infoPort=45157, infoSecurePort=0, ipcPort=33975, storageInfo=lv=-57;cid=testClusterID;nsid=1791374989;c=1731566752608), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T06:46:04,937 WARN [ResponseProcessor for block BP-1678146812-172.17.0.2-1731566752608:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1678146812-172.17.0.2-1731566752608:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-1678146812-172.17.0.2-1731566752608:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:43453,DS-230efee1-675c-4a81-b43c-66956f7e849f,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:04,937 WARN [ResponseProcessor for block BP-1678146812-172.17.0.2-1731566752608:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1678146812-172.17.0.2-1731566752608:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-1678146812-172.17.0.2-1731566752608:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:43453,DS-230efee1-675c-4a81-b43c-66956f7e849f,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:04,936 WARN [ResponseProcessor for block BP-1678146812-172.17.0.2-1731566752608:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1678146812-172.17.0.2-1731566752608:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:04,936 WARN [ResponseProcessor for block BP-1678146812-172.17.0.2-1731566752608:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1678146812-172.17.0.2-1731566752608:blk_1073741837_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:04,938 WARN [DataStreamer for file /user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.1731566753665 block BP-1678146812-172.17.0.2-1731566752608:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1678146812-172.17.0.2-1731566752608:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK], DatanodeInfoWithStorage[127.0.0.1:43453,DS-230efee1-675c-4a81-b43c-66956f7e849f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43453,DS-230efee1-675c-4a81-b43c-66956f7e849f,DISK]) is bad. 2024-11-14T06:46:04,938 WARN [DataStreamer for file /user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 block BP-1678146812-172.17.0.2-1731566752608:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1678146812-172.17.0.2-1731566752608:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43453,DS-230efee1-675c-4a81-b43c-66956f7e849f,DISK], DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43453,DS-230efee1-675c-4a81-b43c-66956f7e849f,DISK]) is bad. 2024-11-14T06:46:04,938 WARN [DataStreamer for file /user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/MasterData/WALs/20680646cf8a,38495,1731566753222/20680646cf8a%2C38495%2C1731566753222.1731566753373 block BP-1678146812-172.17.0.2-1731566752608:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1678146812-172.17.0.2-1731566752608:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43453,DS-230efee1-675c-4a81-b43c-66956f7e849f,DISK], DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43453,DS-230efee1-675c-4a81-b43c-66956f7e849f,DISK]) is bad. 2024-11-14T06:46:04,938 WARN [DataStreamer for file /user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta block BP-1678146812-172.17.0.2-1731566752608:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1678146812-172.17.0.2-1731566752608:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK], DatanodeInfoWithStorage[127.0.0.1:43453,DS-230efee1-675c-4a81-b43c-66956f7e849f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43453,DS-230efee1-675c-4a81-b43c-66956f7e849f,DISK]) is bad. 2024-11-14T06:46:04,938 WARN [PacketResponder: BP-1678146812-172.17.0.2-1731566752608:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:43453] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:04,938 WARN [PacketResponder: BP-1678146812-172.17.0.2-1731566752608:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:43453] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:04,941 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:35812 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:36149:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35812 dst: /127.0.0.1:36149 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:04,941 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_143996592_22 at /127.0.0.1:40868 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:43453:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40868 dst: /127.0.0.1:43453 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:04,941 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1309351602_22 at /127.0.0.1:40800 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:43453:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40800 dst: /127.0.0.1:43453 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:04,941 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1309351602_22 at /127.0.0.1:35774 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:36149:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35774 dst: /127.0.0.1:36149 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:04,941 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:40828 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:43453:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40828 dst: /127.0.0.1:43453 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:04,942 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_143996592_22 at /127.0.0.1:35832 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:36149:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35832 dst: /127.0.0.1:36149 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:04,943 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@52be898{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T06:46:04,942 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:40814 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:43453:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40814 dst: /127.0.0.1:43453 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:04,941 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:35798 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:36149:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35798 dst: /127.0.0.1:36149 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:04,943 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@520d17ab{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T06:46:04,943 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T06:46:04,944 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@39a69c39{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T06:46:04,944 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6d3d4ef0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/hadoop.log.dir/,STOPPED} 2024-11-14T06:46:04,945 WARN [BP-1678146812-172.17.0.2-1731566752608 heartbeating to localhost/127.0.0.1:33995 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T06:46:04,945 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T06:46:04,945 WARN [BP-1678146812-172.17.0.2-1731566752608 heartbeating to localhost/127.0.0.1:33995 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1678146812-172.17.0.2-1731566752608 (Datanode Uuid ff1689b7-6834-4e0f-9cff-2acd1fab3999) service to localhost/127.0.0.1:33995 2024-11-14T06:46:04,945 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T06:46:04,945 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/data/data3/current/BP-1678146812-172.17.0.2-1731566752608 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T06:46:04,946 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/data/data4/current/BP-1678146812-172.17.0.2-1731566752608 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T06:46:04,946 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T06:46:04,947 WARN [DataStreamer for file /user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/MasterData/WALs/20680646cf8a,38495,1731566753222/20680646cf8a%2C38495%2C1731566753222.1731566753373 block BP-1678146812-172.17.0.2-1731566752608:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:04,947 WARN [DataStreamer for file /user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 block BP-1678146812-172.17.0.2-1731566752608:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:04,947 WARN [DataStreamer for file /user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta block BP-1678146812-172.17.0.2-1731566752608:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:04,947 WARN [DataStreamer for file /user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.1731566753665 block BP-1678146812-172.17.0.2-1731566752608:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:04,948 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@f1f9cf1{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T06:46:04,949 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2df55a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T06:46:04,949 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T06:46:04,949 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5bb4f47b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T06:46:04,949 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@35a03ba9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/hadoop.log.dir/,STOPPED} 2024-11-14T06:46:04,950 WARN [BP-1678146812-172.17.0.2-1731566752608 heartbeating to localhost/127.0.0.1:33995 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T06:46:04,950 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T06:46:04,950 WARN [BP-1678146812-172.17.0.2-1731566752608 heartbeating to localhost/127.0.0.1:33995 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1678146812-172.17.0.2-1731566752608 (Datanode Uuid 4be95ec5-4913-4705-b42c-31dd65c3c4f4) service to localhost/127.0.0.1:33995 2024-11-14T06:46:04,950 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T06:46:04,951 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/data/data1/current/BP-1678146812-172.17.0.2-1731566752608 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T06:46:04,951 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/data/data2/current/BP-1678146812-172.17.0.2-1731566752608 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T06:46:04,951 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T06:46:04,954 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1731566754346.4c50aa91d588b8b257a9bbc495433f09., hostname=20680646cf8a,39105,1731566753272, seqNum=2] 2024-11-14T06:46:04,956 ERROR [FSHLog-0-hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9-prefix:20680646cf8a,39105,1731566753272 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:04,956 WARN [FSHLog-0-hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9-prefix:20680646cf8a,39105,1731566753272 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:04,957 INFO [regionserver/20680646cf8a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:04,957 DEBUG [regionserver/20680646cf8a:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 20680646cf8a%2C39105%2C1731566753272:(num 1731566753665) roll requested 2024-11-14T06:46:04,957 INFO [regionserver/20680646cf8a:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 20680646cf8a%2C39105%2C1731566753272.1731566764957 2024-11-14T06:46:04,964 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:46:04,964 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:46:04,964 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:46:04,964 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:46:04,964 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:46:04,965 INFO [regionserver/20680646cf8a:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.1731566753665 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.1731566764957 2024-11-14T06:46:04,965 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:04,965 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:04,966 DEBUG [regionserver/20680646cf8a:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45157:45157),(127.0.0.1/127.0.0.1:42603:42603)] 2024-11-14T06:46:04,966 DEBUG [regionserver/20680646cf8a:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.1731566753665 is not closed yet, will try archiving it next time 2024-11-14T06:46:04,966 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-14T06:46:04,966 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-14T06:46:04,967 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.1731566753665 2024-11-14T06:46:04,969 WARN [IPC Server handler 2 on default port 33995 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.1731566753665 has not been closed. Lease recovery is in progress. RecoveryId = 1019 for block blk_1073741833_1009 2024-11-14T06:46:04,972 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.1731566753665 after 4ms 2024-11-14T06:46:05,350 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:06,305 INFO [regionserver/20680646cf8a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:06,966 INFO [regionserver/20680646cf8a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:06,968 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.1731566764957 2024-11-14T06:46:06,970 WARN [ResponseProcessor for block BP-1678146812-172.17.0.2-1731566752608:blk_1073741838_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1678146812-172.17.0.2-1731566752608:blk_1073741838_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:06,971 WARN [DataStreamer for file /user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.1731566764957 block BP-1678146812-172.17.0.2-1731566752608:blk_1073741838_1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1678146812-172.17.0.2-1731566752608:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40987,DS-5415e41b-ebdb-4ff1-838b-53415f043d44,DISK], DatanodeInfoWithStorage[127.0.0.1:45879,DS-c73e235d-0240-4454-afa5-0e0d9a19b6c4,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40987,DS-5415e41b-ebdb-4ff1-838b-53415f043d44,DISK]) is bad. 2024-11-14T06:46:06,972 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:38012 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:40987:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38012 dst: /127.0.0.1:40987 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:06,972 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:60780 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:45879:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60780 dst: /127.0.0.1:45879 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:06,974 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5b3e7853{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T06:46:06,975 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@65429201{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T06:46:06,975 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T06:46:06,975 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@25509568{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T06:46:06,976 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7c141b19{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/hadoop.log.dir/,STOPPED} 2024-11-14T06:46:06,978 WARN [BP-1678146812-172.17.0.2-1731566752608 heartbeating to localhost/127.0.0.1:33995 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T06:46:06,978 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T06:46:06,978 WARN [BP-1678146812-172.17.0.2-1731566752608 heartbeating to localhost/127.0.0.1:33995 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1678146812-172.17.0.2-1731566752608 (Datanode Uuid d7a86ca6-cdad-42e0-b252-0609960e1a8b) service to localhost/127.0.0.1:33995 2024-11-14T06:46:06,978 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T06:46:06,978 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/data/data9/current/BP-1678146812-172.17.0.2-1731566752608 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T06:46:06,979 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/data/data10/current/BP-1678146812-172.17.0.2-1731566752608 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T06:46:06,979 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T06:46:07,350 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:08,306 INFO [regionserver/20680646cf8a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:08,967 WARN [regionserver/20680646cf8a:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45879,DS-c73e235d-0240-4454-afa5-0e0d9a19b6c4,DISK]] 2024-11-14T06:46:08,967 INFO [regionserver/20680646cf8a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:08,968 DEBUG [regionserver/20680646cf8a:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 20680646cf8a%2C39105%2C1731566753272:(num 1731566764957) roll requested 2024-11-14T06:46:08,968 INFO [regionserver/20680646cf8a:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 20680646cf8a%2C39105%2C1731566753272.1731566768968 2024-11-14T06:46:08,974 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.1731566753665 after 4007ms 2024-11-14T06:46:08,976 WARN [Thread-909 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741839_1021 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43453 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:08,976 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:54670 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741839_1021] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/data/data6]'}, localName='127.0.0.1:35733', datanodeUuid='92e56133-38bc-451b-a9b6-c6dd482e646f', xmitsInProgress=0}:Exception transferring block BP-1678146812-172.17.0.2-1731566752608:blk_1073741839_1021 to mirror 127.0.0.1:43453 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:08,976 WARN [Thread-909 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1678146812-172.17.0.2-1731566752608:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35733,DS-9d414d48-5dd3-4bdb-a1b3-ebac272af800,DISK], DatanodeInfoWithStorage[127.0.0.1:43453,DS-230efee1-675c-4a81-b43c-66956f7e849f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43453,DS-230efee1-675c-4a81-b43c-66956f7e849f,DISK]) is bad. 2024-11-14T06:46:08,976 WARN [Thread-909 {}] hdfs.DataStreamer(1850): Abandoning BP-1678146812-172.17.0.2-1731566752608:blk_1073741839_1021 2024-11-14T06:46:08,976 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:54670 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741839_1021] {}] datanode.BlockReceiver(316): Block 1073741839 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-14T06:46:08,977 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:54670 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:35733:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54670 dst: /127.0.0.1:35733 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:08,979 WARN [Thread-909 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43453,DS-230efee1-675c-4a81-b43c-66956f7e849f,DISK] 2024-11-14T06:46:08,982 WARN [Thread-909 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1022 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:08,982 WARN [Thread-909 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1678146812-172.17.0.2-1731566752608:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK], DatanodeInfoWithStorage[127.0.0.1:40987,DS-5415e41b-ebdb-4ff1-838b-53415f043d44,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]) is bad. 2024-11-14T06:46:08,982 WARN [Thread-909 {}] hdfs.DataStreamer(1850): Abandoning BP-1678146812-172.17.0.2-1731566752608:blk_1073741840_1022 2024-11-14T06:46:08,983 WARN [Thread-909 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK] 2024-11-14T06:46:08,983 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-14T06:46:08,987 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:46:08,987 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:46:08,987 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:46:08,988 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:46:08,988 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:46:08,988 INFO [regionserver/20680646cf8a:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.1731566764957 with entries=3, filesize=3.51 KB; new WAL /user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.1731566768968 2024-11-14T06:46:08,989 DEBUG [regionserver/20680646cf8a:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37411:37411),(127.0.0.1/127.0.0.1:42603:42603)] 2024-11-14T06:46:08,989 DEBUG [regionserver/20680646cf8a:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.1731566753665 is not closed yet, will try archiving it next time 2024-11-14T06:46:08,989 DEBUG [regionserver/20680646cf8a:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.1731566764957 is not closed yet, will try archiving it next time 2024-11-14T06:46:08,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45879 is added to blk_1073741838_1020 (size=3600) 2024-11-14T06:46:09,351 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:09,392 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.1731566753665 is not closed yet, will try archiving it next time 2024-11-14T06:46:10,307 INFO [regionserver/20680646cf8a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:10,988 WARN [ResponseProcessor for block BP-1678146812-172.17.0.2-1731566752608:blk_1073741841_1023 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1678146812-172.17.0.2-1731566752608:blk_1073741841_1023 java.io.IOException: Bad response ERROR for BP-1678146812-172.17.0.2-1731566752608:blk_1073741841_1023 from datanode DatanodeInfoWithStorage[127.0.0.1:45879,DS-c73e235d-0240-4454-afa5-0e0d9a19b6c4,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:10,989 WARN [DataStreamer for file /user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.1731566768968 block BP-1678146812-172.17.0.2-1731566752608:blk_1073741841_1023 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1678146812-172.17.0.2-1731566752608:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35733,DS-9d414d48-5dd3-4bdb-a1b3-ebac272af800,DISK], DatanodeInfoWithStorage[127.0.0.1:45879,DS-c73e235d-0240-4454-afa5-0e0d9a19b6c4,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45879,DS-c73e235d-0240-4454-afa5-0e0d9a19b6c4,DISK]) is bad. 2024-11-14T06:46:10,989 INFO [regionserver/20680646cf8a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:10,989 WARN [PacketResponder: BP-1678146812-172.17.0.2-1731566752608:blk_1073741841_1023, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:45879] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:10,990 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:54680 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741841_1023] {}] datanode.DataXceiver(331): 127.0.0.1:35733:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54680 dst: /127.0.0.1:35733 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:10,990 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:50052 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741841_1023] {}] datanode.DataXceiver(331): 127.0.0.1:45879:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50052 dst: /127.0.0.1:45879 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:10,991 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1d80e9a4{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T06:46:10,991 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2c590722{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T06:46:10,991 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T06:46:10,991 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7c5e4864{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T06:46:10,992 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7ca4b7c9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/hadoop.log.dir/,STOPPED} 2024-11-14T06:46:10,993 WARN [BP-1678146812-172.17.0.2-1731566752608 heartbeating to localhost/127.0.0.1:33995 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T06:46:10,993 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T06:46:10,993 WARN [BP-1678146812-172.17.0.2-1731566752608 heartbeating to localhost/127.0.0.1:33995 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1678146812-172.17.0.2-1731566752608 (Datanode Uuid 973ca49c-160b-49d4-b8ca-7cf03e483e79) service to localhost/127.0.0.1:33995 2024-11-14T06:46:10,993 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T06:46:10,993 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/data/data7/current/BP-1678146812-172.17.0.2-1731566752608 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T06:46:10,994 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/data/data8/current/BP-1678146812-172.17.0.2-1731566752608 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T06:46:10,994 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T06:46:11,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39105 {}] regionserver.HRegion(8855): Flush requested on 4c50aa91d588b8b257a9bbc495433f09 2024-11-14T06:46:11,004 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4c50aa91d588b8b257a9bbc495433f09 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-14T06:46:11,024 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/.tmp/info/3690e7cf64274d0681b32834c73475e5 is 1080, key is row0002/info:/1731566766980/Put/seqid=0 2024-11-14T06:46:11,025 WARN [Thread-919 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741842_1025 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:11,026 WARN [Thread-919 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1678146812-172.17.0.2-1731566752608:blk_1073741842_1025 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43453,DS-230efee1-675c-4a81-b43c-66956f7e849f,DISK], DatanodeInfoWithStorage[127.0.0.1:45879,DS-c73e235d-0240-4454-afa5-0e0d9a19b6c4,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43453,DS-230efee1-675c-4a81-b43c-66956f7e849f,DISK]) is bad. 2024-11-14T06:46:11,026 WARN [Thread-919 {}] hdfs.DataStreamer(1850): Abandoning BP-1678146812-172.17.0.2-1731566752608:blk_1073741842_1025 2024-11-14T06:46:11,026 WARN [Thread-919 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43453,DS-230efee1-675c-4a81-b43c-66956f7e849f,DISK] 2024-11-14T06:46:11,027 WARN [Thread-919 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1026 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:11,027 WARN [Thread-919 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1678146812-172.17.0.2-1731566752608:blk_1073741843_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40987,DS-5415e41b-ebdb-4ff1-838b-53415f043d44,DISK], DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40987,DS-5415e41b-ebdb-4ff1-838b-53415f043d44,DISK]) is bad. 2024-11-14T06:46:11,027 WARN [Thread-919 {}] hdfs.DataStreamer(1850): Abandoning BP-1678146812-172.17.0.2-1731566752608:blk_1073741843_1026 2024-11-14T06:46:11,028 WARN [Thread-919 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40987,DS-5415e41b-ebdb-4ff1-838b-53415f043d44,DISK] 2024-11-14T06:46:11,030 WARN [Thread-919 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36149 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:11,030 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:54702 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741844_1027] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/data/data6]'}, localName='127.0.0.1:35733', datanodeUuid='92e56133-38bc-451b-a9b6-c6dd482e646f', xmitsInProgress=0}:Exception transferring block BP-1678146812-172.17.0.2-1731566752608:blk_1073741844_1027 to mirror 127.0.0.1:36149 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:11,030 WARN [Thread-919 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1678146812-172.17.0.2-1731566752608:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35733,DS-9d414d48-5dd3-4bdb-a1b3-ebac272af800,DISK], DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]) is bad. 2024-11-14T06:46:11,030 WARN [Thread-919 {}] hdfs.DataStreamer(1850): Abandoning BP-1678146812-172.17.0.2-1731566752608:blk_1073741844_1027 2024-11-14T06:46:11,030 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:54702 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741844_1027] {}] datanode.BlockReceiver(316): Block 1073741844 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-14T06:46:11,030 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:54702 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741844_1027] {}] datanode.DataXceiver(331): 127.0.0.1:35733:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54702 dst: /127.0.0.1:35733 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:11,031 WARN [Thread-919 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK] 2024-11-14T06:46:11,032 WARN [Thread-919 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:11,032 WARN [Thread-919 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1678146812-172.17.0.2-1731566752608:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45879,DS-c73e235d-0240-4454-afa5-0e0d9a19b6c4,DISK], DatanodeInfoWithStorage[127.0.0.1:35733,DS-9d414d48-5dd3-4bdb-a1b3-ebac272af800,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45879,DS-c73e235d-0240-4454-afa5-0e0d9a19b6c4,DISK]) is bad. 2024-11-14T06:46:11,032 WARN [Thread-919 {}] hdfs.DataStreamer(1850): Abandoning BP-1678146812-172.17.0.2-1731566752608:blk_1073741845_1028 2024-11-14T06:46:11,033 WARN [Thread-919 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45879,DS-c73e235d-0240-4454-afa5-0e0d9a19b6c4,DISK] 2024-11-14T06:46:11,034 WARN [IPC Server handler 3 on default port 33995 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-14T06:46:11,034 WARN [IPC Server handler 3 on default port 33995 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-14T06:46:11,034 WARN [IPC Server handler 3 on default port 33995 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-14T06:46:11,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35733 is added to blk_1073741846_1029 (size=10347) 2024-11-14T06:46:11,351 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:11,439 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/.tmp/info/3690e7cf64274d0681b32834c73475e5 2024-11-14T06:46:11,452 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/.tmp/info/3690e7cf64274d0681b32834c73475e5 as hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/info/3690e7cf64274d0681b32834c73475e5 2024-11-14T06:46:11,458 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/info/3690e7cf64274d0681b32834c73475e5, entries=5, sequenceid=11, filesize=10.1 K 2024-11-14T06:46:11,459 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for 4c50aa91d588b8b257a9bbc495433f09 in 456ms, sequenceid=11, compaction requested=false 2024-11-14T06:46:11,459 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4c50aa91d588b8b257a9bbc495433f09: 2024-11-14T06:46:11,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39105 {}] regionserver.HRegion(8855): Flush requested on 4c50aa91d588b8b257a9bbc495433f09 2024-11-14T06:46:11,632 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4c50aa91d588b8b257a9bbc495433f09 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-11-14T06:46:11,637 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/.tmp/info/01c8672b52f042089e5629da9f864cce is 1080, key is row0007/info:/1731566771005/Put/seqid=0 2024-11-14T06:46:11,638 WARN [Thread-926 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741847_1030 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:11,639 WARN [Thread-926 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1678146812-172.17.0.2-1731566752608:blk_1073741847_1030 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK], DatanodeInfoWithStorage[127.0.0.1:43453,DS-230efee1-675c-4a81-b43c-66956f7e849f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]) is bad. 2024-11-14T06:46:11,639 WARN [Thread-926 {}] hdfs.DataStreamer(1850): Abandoning BP-1678146812-172.17.0.2-1731566752608:blk_1073741847_1030 2024-11-14T06:46:11,640 WARN [Thread-926 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK] 2024-11-14T06:46:11,643 WARN [Thread-926 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43453 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:11,643 WARN [Thread-926 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1678146812-172.17.0.2-1731566752608:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35733,DS-9d414d48-5dd3-4bdb-a1b3-ebac272af800,DISK], DatanodeInfoWithStorage[127.0.0.1:43453,DS-230efee1-675c-4a81-b43c-66956f7e849f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43453,DS-230efee1-675c-4a81-b43c-66956f7e849f,DISK]) is bad. 2024-11-14T06:46:11,643 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:54726 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741848_1031] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/data/data6]'}, localName='127.0.0.1:35733', datanodeUuid='92e56133-38bc-451b-a9b6-c6dd482e646f', xmitsInProgress=0}:Exception transferring block BP-1678146812-172.17.0.2-1731566752608:blk_1073741848_1031 to mirror 127.0.0.1:43453 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:11,643 WARN [Thread-926 {}] hdfs.DataStreamer(1850): Abandoning BP-1678146812-172.17.0.2-1731566752608:blk_1073741848_1031 2024-11-14T06:46:11,644 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:54726 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741848_1031] {}] datanode.BlockReceiver(316): Block 1073741848 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-14T06:46:11,644 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:54726 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741848_1031] {}] datanode.DataXceiver(331): 127.0.0.1:35733:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54726 dst: /127.0.0.1:35733 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:11,644 WARN [Thread-926 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43453,DS-230efee1-675c-4a81-b43c-66956f7e849f,DISK] 2024-11-14T06:46:11,647 WARN [Thread-926 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40987 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:11,647 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:54728 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741849_1032] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/data/data6]'}, localName='127.0.0.1:35733', datanodeUuid='92e56133-38bc-451b-a9b6-c6dd482e646f', xmitsInProgress=0}:Exception transferring block BP-1678146812-172.17.0.2-1731566752608:blk_1073741849_1032 to mirror 127.0.0.1:40987 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:11,647 WARN [Thread-926 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1678146812-172.17.0.2-1731566752608:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35733,DS-9d414d48-5dd3-4bdb-a1b3-ebac272af800,DISK], DatanodeInfoWithStorage[127.0.0.1:40987,DS-5415e41b-ebdb-4ff1-838b-53415f043d44,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40987,DS-5415e41b-ebdb-4ff1-838b-53415f043d44,DISK]) is bad. 2024-11-14T06:46:11,647 WARN [Thread-926 {}] hdfs.DataStreamer(1850): Abandoning BP-1678146812-172.17.0.2-1731566752608:blk_1073741849_1032 2024-11-14T06:46:11,647 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:54728 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741849_1032] {}] datanode.BlockReceiver(316): Block 1073741849 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-14T06:46:11,647 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:54728 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741849_1032] {}] datanode.DataXceiver(331): 127.0.0.1:35733:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54728 dst: /127.0.0.1:35733 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:11,648 WARN [Thread-926 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40987,DS-5415e41b-ebdb-4ff1-838b-53415f043d44,DISK] 2024-11-14T06:46:11,649 WARN [Thread-926 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:11,649 WARN [Thread-926 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1678146812-172.17.0.2-1731566752608:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45879,DS-c73e235d-0240-4454-afa5-0e0d9a19b6c4,DISK], DatanodeInfoWithStorage[127.0.0.1:35733,DS-9d414d48-5dd3-4bdb-a1b3-ebac272af800,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45879,DS-c73e235d-0240-4454-afa5-0e0d9a19b6c4,DISK]) is bad. 2024-11-14T06:46:11,649 WARN [Thread-926 {}] hdfs.DataStreamer(1850): Abandoning BP-1678146812-172.17.0.2-1731566752608:blk_1073741850_1033 2024-11-14T06:46:11,650 WARN [Thread-926 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45879,DS-c73e235d-0240-4454-afa5-0e0d9a19b6c4,DISK] 2024-11-14T06:46:11,651 WARN [IPC Server handler 4 on default port 33995 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-14T06:46:11,651 WARN [IPC Server handler 4 on default port 33995 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-14T06:46:11,651 WARN [IPC Server handler 4 on default port 33995 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-14T06:46:11,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35733 is added to blk_1073741851_1034 (size=12506) 2024-11-14T06:46:12,056 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/.tmp/info/01c8672b52f042089e5629da9f864cce 2024-11-14T06:46:12,067 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/.tmp/info/01c8672b52f042089e5629da9f864cce as hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/info/01c8672b52f042089e5629da9f864cce 2024-11-14T06:46:12,075 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/info/01c8672b52f042089e5629da9f864cce, entries=7, sequenceid=24, filesize=12.2 K 2024-11-14T06:46:12,077 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for 4c50aa91d588b8b257a9bbc495433f09 in 444ms, sequenceid=24, compaction requested=false 2024-11-14T06:46:12,077 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4c50aa91d588b8b257a9bbc495433f09: 2024-11-14T06:46:12,077 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-11-14T06:46:12,077 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T06:46:12,077 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/info/01c8672b52f042089e5629da9f864cce because midkey is the same as first or last row 2024-11-14T06:46:12,307 INFO [regionserver/20680646cf8a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:12,990 WARN [regionserver/20680646cf8a:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35733,DS-9d414d48-5dd3-4bdb-a1b3-ebac272af800,DISK]] 2024-11-14T06:46:12,990 INFO [regionserver/20680646cf8a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:12,990 DEBUG [regionserver/20680646cf8a:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 20680646cf8a%2C39105%2C1731566753272:(num 1731566768968) roll requested 2024-11-14T06:46:12,991 INFO [regionserver/20680646cf8a:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 20680646cf8a%2C39105%2C1731566753272.1731566772990 2024-11-14T06:46:12,997 WARN [Thread-932 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741852_1035 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43453 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:12,997 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:54744 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741852_1035] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/data/data6]'}, localName='127.0.0.1:35733', datanodeUuid='92e56133-38bc-451b-a9b6-c6dd482e646f', xmitsInProgress=0}:Exception transferring block BP-1678146812-172.17.0.2-1731566752608:blk_1073741852_1035 to mirror 127.0.0.1:43453 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:12,998 WARN [Thread-932 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1678146812-172.17.0.2-1731566752608:blk_1073741852_1035 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35733,DS-9d414d48-5dd3-4bdb-a1b3-ebac272af800,DISK], DatanodeInfoWithStorage[127.0.0.1:43453,DS-230efee1-675c-4a81-b43c-66956f7e849f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43453,DS-230efee1-675c-4a81-b43c-66956f7e849f,DISK]) is bad. 2024-11-14T06:46:12,998 WARN [Thread-932 {}] hdfs.DataStreamer(1850): Abandoning BP-1678146812-172.17.0.2-1731566752608:blk_1073741852_1035 2024-11-14T06:46:12,998 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:54744 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741852_1035] {}] datanode.BlockReceiver(316): Block 1073741852 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-14T06:46:12,998 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:54744 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741852_1035] {}] datanode.DataXceiver(331): 127.0.0.1:35733:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54744 dst: /127.0.0.1:35733 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:12,999 WARN [Thread-932 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43453,DS-230efee1-675c-4a81-b43c-66956f7e849f,DISK] 2024-11-14T06:46:13,001 WARN [Thread-932 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:13,001 WARN [Thread-932 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1678146812-172.17.0.2-1731566752608:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45879,DS-c73e235d-0240-4454-afa5-0e0d9a19b6c4,DISK], DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45879,DS-c73e235d-0240-4454-afa5-0e0d9a19b6c4,DISK]) is bad. 2024-11-14T06:46:13,001 WARN [Thread-932 {}] hdfs.DataStreamer(1850): Abandoning BP-1678146812-172.17.0.2-1731566752608:blk_1073741853_1036 2024-11-14T06:46:13,002 WARN [Thread-932 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45879,DS-c73e235d-0240-4454-afa5-0e0d9a19b6c4,DISK] 2024-11-14T06:46:13,003 WARN [Thread-932 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:13,003 WARN [Thread-932 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1678146812-172.17.0.2-1731566752608:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK], DatanodeInfoWithStorage[127.0.0.1:40987,DS-5415e41b-ebdb-4ff1-838b-53415f043d44,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]) is bad. 2024-11-14T06:46:13,003 WARN [Thread-932 {}] hdfs.DataStreamer(1850): Abandoning BP-1678146812-172.17.0.2-1731566752608:blk_1073741854_1037 2024-11-14T06:46:13,004 WARN [Thread-932 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK] 2024-11-14T06:46:13,005 WARN [Thread-932 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40987 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:13,005 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:54750 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741855_1038] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/data/data6]'}, localName='127.0.0.1:35733', datanodeUuid='92e56133-38bc-451b-a9b6-c6dd482e646f', xmitsInProgress=0}:Exception transferring block BP-1678146812-172.17.0.2-1731566752608:blk_1073741855_1038 to mirror 127.0.0.1:40987 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:13,006 WARN [Thread-932 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1678146812-172.17.0.2-1731566752608:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35733,DS-9d414d48-5dd3-4bdb-a1b3-ebac272af800,DISK], DatanodeInfoWithStorage[127.0.0.1:40987,DS-5415e41b-ebdb-4ff1-838b-53415f043d44,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40987,DS-5415e41b-ebdb-4ff1-838b-53415f043d44,DISK]) is bad. 2024-11-14T06:46:13,006 WARN [Thread-932 {}] hdfs.DataStreamer(1850): Abandoning BP-1678146812-172.17.0.2-1731566752608:blk_1073741855_1038 2024-11-14T06:46:13,006 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:54750 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741855_1038] {}] datanode.BlockReceiver(316): Block 1073741855 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-14T06:46:13,006 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:54750 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741855_1038] {}] datanode.DataXceiver(331): 127.0.0.1:35733:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54750 dst: /127.0.0.1:35733 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:13,006 WARN [Thread-932 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40987,DS-5415e41b-ebdb-4ff1-838b-53415f043d44,DISK] 2024-11-14T06:46:13,007 WARN [IPC Server handler 1 on default port 33995 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-14T06:46:13,007 WARN [IPC Server handler 1 on default port 33995 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-14T06:46:13,007 WARN [IPC Server handler 1 on default port 33995 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-14T06:46:13,009 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:46:13,010 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:46:13,010 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:46:13,010 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:46:13,010 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:46:13,010 INFO [regionserver/20680646cf8a:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.1731566768968 with entries=24, filesize=24.23 KB; new WAL /user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.1731566772990 2024-11-14T06:46:13,011 DEBUG [regionserver/20680646cf8a:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37411:37411)] 2024-11-14T06:46:13,011 DEBUG [regionserver/20680646cf8a:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.1731566753665 is not closed yet, will try archiving it next time 2024-11-14T06:46:13,011 DEBUG [regionserver/20680646cf8a:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.1731566768968 is not closed yet, will try archiving it next time 2024-11-14T06:46:13,012 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.1731566764957 to hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/oldWALs/20680646cf8a%2C39105%2C1731566753272.1731566764957 2024-11-14T06:46:13,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35733 is added to blk_1073741841_1024 (size=24823) 2024-11-14T06:46:13,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39105 {}] regionserver.HRegion(8855): Flush requested on 4c50aa91d588b8b257a9bbc495433f09 2024-11-14T06:46:13,064 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4c50aa91d588b8b257a9bbc495433f09 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-14T06:46:13,071 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/.tmp/info/d5e8b7c7450d45e28ec3e10333b5e0bd is 1079, key is tmprow/info:/1731566773062/Put/seqid=0 2024-11-14T06:46:13,073 WARN [Thread-938 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741857_1040 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:13,073 WARN [Thread-938 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1678146812-172.17.0.2-1731566752608:blk_1073741857_1040 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK], DatanodeInfoWithStorage[127.0.0.1:43453,DS-230efee1-675c-4a81-b43c-66956f7e849f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]) is bad. 2024-11-14T06:46:13,074 WARN [Thread-938 {}] hdfs.DataStreamer(1850): Abandoning BP-1678146812-172.17.0.2-1731566752608:blk_1073741857_1040 2024-11-14T06:46:13,074 WARN [Thread-938 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK] 2024-11-14T06:46:13,076 WARN [Thread-938 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43453 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:13,076 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:54766 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741858_1041] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/data/data6]'}, localName='127.0.0.1:35733', datanodeUuid='92e56133-38bc-451b-a9b6-c6dd482e646f', xmitsInProgress=0}:Exception transferring block BP-1678146812-172.17.0.2-1731566752608:blk_1073741858_1041 to mirror 127.0.0.1:43453 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:13,076 WARN [Thread-938 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1678146812-172.17.0.2-1731566752608:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35733,DS-9d414d48-5dd3-4bdb-a1b3-ebac272af800,DISK], DatanodeInfoWithStorage[127.0.0.1:43453,DS-230efee1-675c-4a81-b43c-66956f7e849f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43453,DS-230efee1-675c-4a81-b43c-66956f7e849f,DISK]) is bad. 2024-11-14T06:46:13,076 WARN [Thread-938 {}] hdfs.DataStreamer(1850): Abandoning BP-1678146812-172.17.0.2-1731566752608:blk_1073741858_1041 2024-11-14T06:46:13,076 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:54766 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741858_1041] {}] datanode.BlockReceiver(316): Block 1073741858 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-14T06:46:13,076 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:54766 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741858_1041] {}] datanode.DataXceiver(331): 127.0.0.1:35733:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54766 dst: /127.0.0.1:35733 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:13,077 WARN [Thread-938 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43453,DS-230efee1-675c-4a81-b43c-66956f7e849f,DISK] 2024-11-14T06:46:13,079 WARN [Thread-938 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45879 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:13,078 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:54776 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741859_1042] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/data/data6]'}, localName='127.0.0.1:35733', datanodeUuid='92e56133-38bc-451b-a9b6-c6dd482e646f', xmitsInProgress=0}:Exception transferring block BP-1678146812-172.17.0.2-1731566752608:blk_1073741859_1042 to mirror 127.0.0.1:45879 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:13,079 WARN [Thread-938 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1678146812-172.17.0.2-1731566752608:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35733,DS-9d414d48-5dd3-4bdb-a1b3-ebac272af800,DISK], DatanodeInfoWithStorage[127.0.0.1:45879,DS-c73e235d-0240-4454-afa5-0e0d9a19b6c4,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45879,DS-c73e235d-0240-4454-afa5-0e0d9a19b6c4,DISK]) is bad. 2024-11-14T06:46:13,079 WARN [Thread-938 {}] hdfs.DataStreamer(1850): Abandoning BP-1678146812-172.17.0.2-1731566752608:blk_1073741859_1042 2024-11-14T06:46:13,079 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:54776 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741859_1042] {}] datanode.BlockReceiver(316): Block 1073741859 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-14T06:46:13,079 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:54776 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741859_1042] {}] datanode.DataXceiver(331): 127.0.0.1:35733:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54776 dst: /127.0.0.1:35733 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:13,079 WARN [Thread-938 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45879,DS-c73e235d-0240-4454-afa5-0e0d9a19b6c4,DISK] 2024-11-14T06:46:13,081 WARN [Thread-938 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40987 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:13,081 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:54778 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741860_1043] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/data/data6]'}, localName='127.0.0.1:35733', datanodeUuid='92e56133-38bc-451b-a9b6-c6dd482e646f', xmitsInProgress=0}:Exception transferring block BP-1678146812-172.17.0.2-1731566752608:blk_1073741860_1043 to mirror 127.0.0.1:40987 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:13,082 WARN [Thread-938 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1678146812-172.17.0.2-1731566752608:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35733,DS-9d414d48-5dd3-4bdb-a1b3-ebac272af800,DISK], DatanodeInfoWithStorage[127.0.0.1:40987,DS-5415e41b-ebdb-4ff1-838b-53415f043d44,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40987,DS-5415e41b-ebdb-4ff1-838b-53415f043d44,DISK]) is bad. 2024-11-14T06:46:13,082 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:54778 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741860_1043] {}] datanode.BlockReceiver(316): Block 1073741860 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-14T06:46:13,082 WARN [Thread-938 {}] hdfs.DataStreamer(1850): Abandoning BP-1678146812-172.17.0.2-1731566752608:blk_1073741860_1043 2024-11-14T06:46:13,082 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:54778 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741860_1043] {}] datanode.DataXceiver(331): 127.0.0.1:35733:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54778 dst: /127.0.0.1:35733 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:13,082 WARN [Thread-938 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40987,DS-5415e41b-ebdb-4ff1-838b-53415f043d44,DISK] 2024-11-14T06:46:13,083 WARN [IPC Server handler 3 on default port 33995 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-14T06:46:13,083 WARN [IPC Server handler 3 on default port 33995 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-14T06:46:13,083 WARN [IPC Server handler 3 on default port 33995 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-14T06:46:13,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35733 is added to blk_1073741861_1044 (size=6027) 2024-11-14T06:46:13,352 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:13,414 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.1731566753665 is not closed yet, will try archiving it next time 2024-11-14T06:46:13,488 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/.tmp/info/d5e8b7c7450d45e28ec3e10333b5e0bd 2024-11-14T06:46:13,499 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/.tmp/info/d5e8b7c7450d45e28ec3e10333b5e0bd as hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/info/d5e8b7c7450d45e28ec3e10333b5e0bd 2024-11-14T06:46:13,505 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/info/d5e8b7c7450d45e28ec3e10333b5e0bd, entries=1, sequenceid=34, filesize=5.9 K 2024-11-14T06:46:13,507 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 4c50aa91d588b8b257a9bbc495433f09 in 444ms, sequenceid=34, compaction requested=true 2024-11-14T06:46:13,507 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4c50aa91d588b8b257a9bbc495433f09: 2024-11-14T06:46:13,507 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-11-14T06:46:13,507 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T06:46:13,507 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/info/01c8672b52f042089e5629da9f864cce because midkey is the same as first or last row 2024-11-14T06:46:13,507 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4c50aa91d588b8b257a9bbc495433f09:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T06:46:13,507 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T06:46:13,507 DEBUG [RS:0;20680646cf8a:39105-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T06:46:13,509 DEBUG [RS:0;20680646cf8a:39105-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T06:46:13,509 DEBUG [RS:0;20680646cf8a:39105-shortCompactions-0 {}] regionserver.HStore(1541): 4c50aa91d588b8b257a9bbc495433f09/info is initiating minor compaction (all files) 2024-11-14T06:46:13,509 INFO [RS:0;20680646cf8a:39105-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 4c50aa91d588b8b257a9bbc495433f09/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731566754346.4c50aa91d588b8b257a9bbc495433f09. 2024-11-14T06:46:13,509 INFO [RS:0;20680646cf8a:39105-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/info/3690e7cf64274d0681b32834c73475e5, hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/info/01c8672b52f042089e5629da9f864cce, hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/info/d5e8b7c7450d45e28ec3e10333b5e0bd] into tmpdir=hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/.tmp, totalSize=28.2 K 2024-11-14T06:46:13,509 DEBUG [RS:0;20680646cf8a:39105-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3690e7cf64274d0681b32834c73475e5, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731566766980 2024-11-14T06:46:13,510 DEBUG [RS:0;20680646cf8a:39105-shortCompactions-0 {}] compactions.Compactor(225): Compacting 01c8672b52f042089e5629da9f864cce, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1731566771005 2024-11-14T06:46:13,510 DEBUG [RS:0;20680646cf8a:39105-shortCompactions-0 {}] compactions.Compactor(225): Compacting d5e8b7c7450d45e28ec3e10333b5e0bd, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1731566773062 2024-11-14T06:46:13,523 INFO [RS:0;20680646cf8a:39105-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4c50aa91d588b8b257a9bbc495433f09#info#compaction#21 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T06:46:13,524 DEBUG [RS:0;20680646cf8a:39105-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/.tmp/info/f974ae895eae4e1f9500941b8b0872b8 is 1080, key is row0002/info:/1731566766980/Put/seqid=0 2024-11-14T06:46:13,525 WARN [Thread-947 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741862_1045 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:13,526 WARN [Thread-947 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1678146812-172.17.0.2-1731566752608:blk_1073741862_1045 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK], DatanodeInfoWithStorage[127.0.0.1:45879,DS-c73e235d-0240-4454-afa5-0e0d9a19b6c4,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]) is bad. 2024-11-14T06:46:13,526 WARN [Thread-947 {}] hdfs.DataStreamer(1850): Abandoning BP-1678146812-172.17.0.2-1731566752608:blk_1073741862_1045 2024-11-14T06:46:13,526 WARN [Thread-947 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK] 2024-11-14T06:46:13,528 WARN [Thread-947 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:13,528 WARN [Thread-947 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1678146812-172.17.0.2-1731566752608:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43453,DS-230efee1-675c-4a81-b43c-66956f7e849f,DISK], DatanodeInfoWithStorage[127.0.0.1:45879,DS-c73e235d-0240-4454-afa5-0e0d9a19b6c4,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43453,DS-230efee1-675c-4a81-b43c-66956f7e849f,DISK]) is bad. 2024-11-14T06:46:13,528 WARN [Thread-947 {}] hdfs.DataStreamer(1850): Abandoning BP-1678146812-172.17.0.2-1731566752608:blk_1073741863_1046 2024-11-14T06:46:13,528 WARN [Thread-947 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43453,DS-230efee1-675c-4a81-b43c-66956f7e849f,DISK] 2024-11-14T06:46:13,531 WARN [Thread-947 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45879 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:13,531 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:54820 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741864_1047] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/data/data6]'}, localName='127.0.0.1:35733', datanodeUuid='92e56133-38bc-451b-a9b6-c6dd482e646f', xmitsInProgress=0}:Exception transferring block BP-1678146812-172.17.0.2-1731566752608:blk_1073741864_1047 to mirror 127.0.0.1:45879 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:13,531 WARN [Thread-947 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1678146812-172.17.0.2-1731566752608:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35733,DS-9d414d48-5dd3-4bdb-a1b3-ebac272af800,DISK], DatanodeInfoWithStorage[127.0.0.1:45879,DS-c73e235d-0240-4454-afa5-0e0d9a19b6c4,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45879,DS-c73e235d-0240-4454-afa5-0e0d9a19b6c4,DISK]) is bad. 2024-11-14T06:46:13,531 WARN [Thread-947 {}] hdfs.DataStreamer(1850): Abandoning BP-1678146812-172.17.0.2-1731566752608:blk_1073741864_1047 2024-11-14T06:46:13,531 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:54820 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741864_1047] {}] datanode.BlockReceiver(316): Block 1073741864 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-14T06:46:13,531 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:54820 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741864_1047] {}] datanode.DataXceiver(331): 127.0.0.1:35733:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54820 dst: /127.0.0.1:35733 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:13,531 WARN [Thread-947 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45879,DS-c73e235d-0240-4454-afa5-0e0d9a19b6c4,DISK] 2024-11-14T06:46:13,533 WARN [Thread-947 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:13,533 WARN [Thread-947 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1678146812-172.17.0.2-1731566752608:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40987,DS-5415e41b-ebdb-4ff1-838b-53415f043d44,DISK], DatanodeInfoWithStorage[127.0.0.1:35733,DS-9d414d48-5dd3-4bdb-a1b3-ebac272af800,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40987,DS-5415e41b-ebdb-4ff1-838b-53415f043d44,DISK]) is bad. 2024-11-14T06:46:13,533 WARN [Thread-947 {}] hdfs.DataStreamer(1850): Abandoning BP-1678146812-172.17.0.2-1731566752608:blk_1073741865_1048 2024-11-14T06:46:13,534 WARN [Thread-947 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40987,DS-5415e41b-ebdb-4ff1-838b-53415f043d44,DISK] 2024-11-14T06:46:13,534 WARN [IPC Server handler 0 on default port 33995 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-14T06:46:13,534 WARN [IPC Server handler 0 on default port 33995 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-14T06:46:13,535 WARN [IPC Server handler 0 on default port 33995 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-14T06:46:13,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35733 is added to blk_1073741866_1049 (size=17994) 2024-11-14T06:46:13,953 DEBUG [RS:0;20680646cf8a:39105-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/.tmp/info/f974ae895eae4e1f9500941b8b0872b8 as hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/info/f974ae895eae4e1f9500941b8b0872b8 2024-11-14T06:46:13,959 INFO [RS:0;20680646cf8a:39105-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 4c50aa91d588b8b257a9bbc495433f09/info of 4c50aa91d588b8b257a9bbc495433f09 into f974ae895eae4e1f9500941b8b0872b8(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T06:46:13,959 DEBUG [RS:0;20680646cf8a:39105-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 4c50aa91d588b8b257a9bbc495433f09: 2024-11-14T06:46:13,960 INFO [RS:0;20680646cf8a:39105-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731566754346.4c50aa91d588b8b257a9bbc495433f09., storeName=4c50aa91d588b8b257a9bbc495433f09/info, priority=13, startTime=1731566773507; duration=0sec 2024-11-14T06:46:13,960 DEBUG [RS:0;20680646cf8a:39105-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-14T06:46:13,960 DEBUG [RS:0;20680646cf8a:39105-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T06:46:13,960 DEBUG [RS:0;20680646cf8a:39105-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/info/f974ae895eae4e1f9500941b8b0872b8 because midkey is the same as first or last row 2024-11-14T06:46:13,960 DEBUG [RS:0;20680646cf8a:39105-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-14T06:46:13,960 DEBUG [RS:0;20680646cf8a:39105-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T06:46:13,960 DEBUG [RS:0;20680646cf8a:39105-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/info/f974ae895eae4e1f9500941b8b0872b8 because midkey is the same as first or last row 2024-11-14T06:46:13,960 DEBUG [RS:0;20680646cf8a:39105-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-14T06:46:13,960 DEBUG [RS:0;20680646cf8a:39105-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T06:46:13,960 DEBUG [RS:0;20680646cf8a:39105-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/info/f974ae895eae4e1f9500941b8b0872b8 because midkey is the same as first or last row 2024-11-14T06:46:13,960 DEBUG [RS:0;20680646cf8a:39105-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T06:46:13,961 DEBUG [RS:0;20680646cf8a:39105-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4c50aa91d588b8b257a9bbc495433f09:info 2024-11-14T06:46:14,308 INFO [regionserver/20680646cf8a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:14,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39105 {}] regionserver.HRegion(8855): Flush requested on 4c50aa91d588b8b257a9bbc495433f09 2024-11-14T06:46:14,487 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4c50aa91d588b8b257a9bbc495433f09 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-14T06:46:14,493 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/.tmp/info/1ad395ed7ea9474ead9e9cd7e6584e72 is 1079, key is tmprow/info:/1731566774486/Put/seqid=0 2024-11-14T06:46:14,495 WARN [Thread-952 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741867_1050 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:14,495 WARN [Thread-952 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1678146812-172.17.0.2-1731566752608:blk_1073741867_1050 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45879,DS-c73e235d-0240-4454-afa5-0e0d9a19b6c4,DISK], DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45879,DS-c73e235d-0240-4454-afa5-0e0d9a19b6c4,DISK]) is bad. 2024-11-14T06:46:14,495 WARN [Thread-952 {}] hdfs.DataStreamer(1850): Abandoning BP-1678146812-172.17.0.2-1731566752608:blk_1073741867_1050 2024-11-14T06:46:14,496 WARN [Thread-952 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45879,DS-c73e235d-0240-4454-afa5-0e0d9a19b6c4,DISK] 2024-11-14T06:46:14,497 WARN [Thread-952 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741868_1051 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:14,498 WARN [Thread-952 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1678146812-172.17.0.2-1731566752608:blk_1073741868_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40987,DS-5415e41b-ebdb-4ff1-838b-53415f043d44,DISK], DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40987,DS-5415e41b-ebdb-4ff1-838b-53415f043d44,DISK]) is bad. 2024-11-14T06:46:14,498 WARN [Thread-952 {}] hdfs.DataStreamer(1850): Abandoning BP-1678146812-172.17.0.2-1731566752608:blk_1073741868_1051 2024-11-14T06:46:14,498 WARN [Thread-952 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40987,DS-5415e41b-ebdb-4ff1-838b-53415f043d44,DISK] 2024-11-14T06:46:14,501 WARN [Thread-952 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36149 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:14,501 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:54840 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741869_1052] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/data/data6]'}, localName='127.0.0.1:35733', datanodeUuid='92e56133-38bc-451b-a9b6-c6dd482e646f', xmitsInProgress=0}:Exception transferring block BP-1678146812-172.17.0.2-1731566752608:blk_1073741869_1052 to mirror 127.0.0.1:36149 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:14,502 WARN [Thread-952 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1678146812-172.17.0.2-1731566752608:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35733,DS-9d414d48-5dd3-4bdb-a1b3-ebac272af800,DISK], DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]) is bad. 2024-11-14T06:46:14,502 WARN [Thread-952 {}] hdfs.DataStreamer(1850): Abandoning BP-1678146812-172.17.0.2-1731566752608:blk_1073741869_1052 2024-11-14T06:46:14,502 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:54840 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741869_1052] {}] datanode.BlockReceiver(316): Block 1073741869 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-14T06:46:14,502 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:54840 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741869_1052] {}] datanode.DataXceiver(331): 127.0.0.1:35733:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54840 dst: /127.0.0.1:35733 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:14,503 WARN [Thread-952 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK] 2024-11-14T06:46:14,505 WARN [Thread-952 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:14,505 WARN [Thread-952 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1678146812-172.17.0.2-1731566752608:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43453,DS-230efee1-675c-4a81-b43c-66956f7e849f,DISK], DatanodeInfoWithStorage[127.0.0.1:35733,DS-9d414d48-5dd3-4bdb-a1b3-ebac272af800,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43453,DS-230efee1-675c-4a81-b43c-66956f7e849f,DISK]) is bad. 2024-11-14T06:46:14,505 WARN [Thread-952 {}] hdfs.DataStreamer(1850): Abandoning BP-1678146812-172.17.0.2-1731566752608:blk_1073741870_1053 2024-11-14T06:46:14,506 WARN [Thread-952 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43453,DS-230efee1-675c-4a81-b43c-66956f7e849f,DISK] 2024-11-14T06:46:14,507 WARN [IPC Server handler 1 on default port 33995 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-14T06:46:14,507 WARN [IPC Server handler 1 on default port 33995 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-14T06:46:14,507 WARN [IPC Server handler 1 on default port 33995 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-14T06:46:14,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35733 is added to blk_1073741871_1054 (size=6027) 2024-11-14T06:46:14,627 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@592222f4[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35733, datanodeUuid=92e56133-38bc-451b-a9b6-c6dd482e646f, infoPort=37411, infoSecurePort=0, ipcPort=43239, storageInfo=lv=-57;cid=testClusterID;nsid=1791374989;c=1731566752608):Failed to transfer BP-1678146812-172.17.0.2-1731566752608:blk_1073741851_1034 to 127.0.0.1:36149 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:14,627 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@aef2d4f[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35733, datanodeUuid=92e56133-38bc-451b-a9b6-c6dd482e646f, infoPort=37411, infoSecurePort=0, ipcPort=43239, storageInfo=lv=-57;cid=testClusterID;nsid=1791374989;c=1731566752608):Failed to transfer BP-1678146812-172.17.0.2-1731566752608:blk_1073741846_1029 to 127.0.0.1:40987 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:14,910 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/.tmp/info/1ad395ed7ea9474ead9e9cd7e6584e72 2024-11-14T06:46:14,921 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/.tmp/info/1ad395ed7ea9474ead9e9cd7e6584e72 as hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/info/1ad395ed7ea9474ead9e9cd7e6584e72 2024-11-14T06:46:14,928 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/info/1ad395ed7ea9474ead9e9cd7e6584e72, entries=1, sequenceid=45, filesize=5.9 K 2024-11-14T06:46:14,929 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 4c50aa91d588b8b257a9bbc495433f09 in 442ms, sequenceid=45, compaction requested=false 2024-11-14T06:46:14,930 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4c50aa91d588b8b257a9bbc495433f09: 2024-11-14T06:46:14,930 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-11-14T06:46:14,930 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T06:46:14,930 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/info/f974ae895eae4e1f9500941b8b0872b8 because midkey is the same as first or last row 2024-11-14T06:46:15,012 WARN [regionserver/20680646cf8a:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35733,DS-9d414d48-5dd3-4bdb-a1b3-ebac272af800,DISK]] 2024-11-14T06:46:15,012 INFO [regionserver/20680646cf8a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:15,012 DEBUG [regionserver/20680646cf8a:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 20680646cf8a%2C39105%2C1731566753272:(num 1731566772990) roll requested 2024-11-14T06:46:15,013 INFO [regionserver/20680646cf8a:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 20680646cf8a%2C39105%2C1731566753272.1731566775012 2024-11-14T06:46:15,017 WARN [Thread-960 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741872_1055 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:15,017 WARN [Thread-960 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1678146812-172.17.0.2-1731566752608:blk_1073741872_1055 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45879,DS-c73e235d-0240-4454-afa5-0e0d9a19b6c4,DISK], DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45879,DS-c73e235d-0240-4454-afa5-0e0d9a19b6c4,DISK]) is bad. 2024-11-14T06:46:15,017 WARN [Thread-960 {}] hdfs.DataStreamer(1850): Abandoning BP-1678146812-172.17.0.2-1731566752608:blk_1073741872_1055 2024-11-14T06:46:15,019 WARN [Thread-960 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45879,DS-c73e235d-0240-4454-afa5-0e0d9a19b6c4,DISK] 2024-11-14T06:46:15,021 WARN [Thread-960 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741873_1056 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:15,022 WARN [Thread-960 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1678146812-172.17.0.2-1731566752608:blk_1073741873_1056 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43453,DS-230efee1-675c-4a81-b43c-66956f7e849f,DISK], DatanodeInfoWithStorage[127.0.0.1:35733,DS-9d414d48-5dd3-4bdb-a1b3-ebac272af800,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43453,DS-230efee1-675c-4a81-b43c-66956f7e849f,DISK]) is bad. 2024-11-14T06:46:15,022 WARN [Thread-960 {}] hdfs.DataStreamer(1850): Abandoning BP-1678146812-172.17.0.2-1731566752608:blk_1073741873_1056 2024-11-14T06:46:15,022 WARN [Thread-960 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43453,DS-230efee1-675c-4a81-b43c-66956f7e849f,DISK] 2024-11-14T06:46:15,025 WARN [Thread-960 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:15,025 WARN [Thread-960 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1678146812-172.17.0.2-1731566752608:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK], DatanodeInfoWithStorage[127.0.0.1:40987,DS-5415e41b-ebdb-4ff1-838b-53415f043d44,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]) is bad. 2024-11-14T06:46:15,025 WARN [Thread-960 {}] hdfs.DataStreamer(1850): Abandoning BP-1678146812-172.17.0.2-1731566752608:blk_1073741874_1057 2024-11-14T06:46:15,026 WARN [Thread-960 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK] 2024-11-14T06:46:15,029 WARN [Thread-960 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40987 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:15,029 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:54850 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741875_1058] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/data/data6]'}, localName='127.0.0.1:35733', datanodeUuid='92e56133-38bc-451b-a9b6-c6dd482e646f', xmitsInProgress=0}:Exception transferring block BP-1678146812-172.17.0.2-1731566752608:blk_1073741875_1058 to mirror 127.0.0.1:40987 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:15,029 WARN [Thread-960 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1678146812-172.17.0.2-1731566752608:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35733,DS-9d414d48-5dd3-4bdb-a1b3-ebac272af800,DISK], DatanodeInfoWithStorage[127.0.0.1:40987,DS-5415e41b-ebdb-4ff1-838b-53415f043d44,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40987,DS-5415e41b-ebdb-4ff1-838b-53415f043d44,DISK]) is bad. 2024-11-14T06:46:15,029 WARN [Thread-960 {}] hdfs.DataStreamer(1850): Abandoning BP-1678146812-172.17.0.2-1731566752608:blk_1073741875_1058 2024-11-14T06:46:15,029 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:54850 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741875_1058] {}] datanode.BlockReceiver(316): Block 1073741875 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-14T06:46:15,029 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:54850 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741875_1058] {}] datanode.DataXceiver(331): 127.0.0.1:35733:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54850 dst: /127.0.0.1:35733 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:15,030 WARN [Thread-960 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40987,DS-5415e41b-ebdb-4ff1-838b-53415f043d44,DISK] 2024-11-14T06:46:15,031 WARN [IPC Server handler 1 on default port 33995 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-14T06:46:15,031 WARN [IPC Server handler 1 on default port 33995 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-14T06:46:15,031 WARN [IPC Server handler 1 on default port 33995 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-14T06:46:15,033 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:46:15,033 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:46:15,033 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:46:15,034 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:46:15,034 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:46:15,034 INFO [regionserver/20680646cf8a:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.1731566772990 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.1731566775012 2024-11-14T06:46:15,034 DEBUG [regionserver/20680646cf8a:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37411:37411)] 2024-11-14T06:46:15,035 DEBUG [regionserver/20680646cf8a:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.1731566753665 is not closed yet, will try archiving it next time 2024-11-14T06:46:15,035 DEBUG [regionserver/20680646cf8a:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.1731566772990 is not closed yet, will try archiving it next time 2024-11-14T06:46:15,035 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.1731566768968 to hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/oldWALs/20680646cf8a%2C39105%2C1731566753272.1731566768968 2024-11-14T06:46:15,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35733 is added to blk_1073741856_1039 (size=13591) 2024-11-14T06:46:15,353 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:15,437 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.1731566753665 is not closed yet, will try archiving it next time 2024-11-14T06:46:15,619 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@aef2d4f[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35733, datanodeUuid=92e56133-38bc-451b-a9b6-c6dd482e646f, infoPort=37411, infoSecurePort=0, ipcPort=43239, storageInfo=lv=-57;cid=testClusterID;nsid=1791374989;c=1731566752608):Failed to transfer BP-1678146812-172.17.0.2-1731566752608:blk_1073741841_1024 to 127.0.0.1:45879 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:15,619 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@592222f4[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35733, datanodeUuid=92e56133-38bc-451b-a9b6-c6dd482e646f, infoPort=37411, infoSecurePort=0, ipcPort=43239, storageInfo=lv=-57;cid=testClusterID;nsid=1791374989;c=1731566752608):Failed to transfer BP-1678146812-172.17.0.2-1731566752608:blk_1073741861_1044 to 127.0.0.1:45879 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:15,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39105 {}] regionserver.HRegion(8855): Flush requested on 4c50aa91d588b8b257a9bbc495433f09 2024-11-14T06:46:15,913 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4c50aa91d588b8b257a9bbc495433f09 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-14T06:46:15,923 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/.tmp/info/80fc92c800d24bb0946e0ceea452e155 is 1079, key is tmprow/info:/1731566775910/Put/seqid=0 2024-11-14T06:46:15,925 WARN [Thread-965 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741877_1060 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:15,925 WARN [Thread-965 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1678146812-172.17.0.2-1731566752608:blk_1073741877_1060 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43453,DS-230efee1-675c-4a81-b43c-66956f7e849f,DISK], DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43453,DS-230efee1-675c-4a81-b43c-66956f7e849f,DISK]) is bad. 2024-11-14T06:46:15,925 WARN [Thread-965 {}] hdfs.DataStreamer(1850): Abandoning BP-1678146812-172.17.0.2-1731566752608:blk_1073741877_1060 2024-11-14T06:46:15,926 WARN [Thread-965 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43453,DS-230efee1-675c-4a81-b43c-66956f7e849f,DISK] 2024-11-14T06:46:15,927 WARN [Thread-965 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741878_1061 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:15,927 WARN [Thread-965 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1678146812-172.17.0.2-1731566752608:blk_1073741878_1061 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45879,DS-c73e235d-0240-4454-afa5-0e0d9a19b6c4,DISK], DatanodeInfoWithStorage[127.0.0.1:35733,DS-9d414d48-5dd3-4bdb-a1b3-ebac272af800,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45879,DS-c73e235d-0240-4454-afa5-0e0d9a19b6c4,DISK]) is bad. 2024-11-14T06:46:15,927 WARN [Thread-965 {}] hdfs.DataStreamer(1850): Abandoning BP-1678146812-172.17.0.2-1731566752608:blk_1073741878_1061 2024-11-14T06:46:15,927 WARN [Thread-965 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45879,DS-c73e235d-0240-4454-afa5-0e0d9a19b6c4,DISK] 2024-11-14T06:46:15,928 WARN [Thread-965 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1062 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:15,928 WARN [Thread-965 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1678146812-172.17.0.2-1731566752608:blk_1073741879_1062 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK], DatanodeInfoWithStorage[127.0.0.1:35733,DS-9d414d48-5dd3-4bdb-a1b3-ebac272af800,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]) is bad. 2024-11-14T06:46:15,928 WARN [Thread-965 {}] hdfs.DataStreamer(1850): Abandoning BP-1678146812-172.17.0.2-1731566752608:blk_1073741879_1062 2024-11-14T06:46:15,929 WARN [Thread-965 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK] 2024-11-14T06:46:15,931 WARN [Thread-965 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741880_1063 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40987 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:15,931 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:54872 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741880_1063] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/data/data6]'}, localName='127.0.0.1:35733', datanodeUuid='92e56133-38bc-451b-a9b6-c6dd482e646f', xmitsInProgress=0}:Exception transferring block BP-1678146812-172.17.0.2-1731566752608:blk_1073741880_1063 to mirror 127.0.0.1:40987 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:15,931 WARN [Thread-965 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1678146812-172.17.0.2-1731566752608:blk_1073741880_1063 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35733,DS-9d414d48-5dd3-4bdb-a1b3-ebac272af800,DISK], DatanodeInfoWithStorage[127.0.0.1:40987,DS-5415e41b-ebdb-4ff1-838b-53415f043d44,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40987,DS-5415e41b-ebdb-4ff1-838b-53415f043d44,DISK]) is bad. 2024-11-14T06:46:15,931 WARN [Thread-965 {}] hdfs.DataStreamer(1850): Abandoning BP-1678146812-172.17.0.2-1731566752608:blk_1073741880_1063 2024-11-14T06:46:15,931 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:54872 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741880_1063] {}] datanode.BlockReceiver(316): Block 1073741880 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-14T06:46:15,931 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:54872 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741880_1063] {}] datanode.DataXceiver(331): 127.0.0.1:35733:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54872 dst: /127.0.0.1:35733 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:15,931 WARN [Thread-965 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40987,DS-5415e41b-ebdb-4ff1-838b-53415f043d44,DISK] 2024-11-14T06:46:15,932 WARN [IPC Server handler 1 on default port 33995 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-14T06:46:15,932 WARN [IPC Server handler 1 on default port 33995 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-14T06:46:15,932 WARN [IPC Server handler 1 on default port 33995 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-14T06:46:15,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35733 is added to blk_1073741881_1064 (size=6027) 2024-11-14T06:46:16,309 INFO [regionserver/20680646cf8a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:16,336 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/.tmp/info/80fc92c800d24bb0946e0ceea452e155 2024-11-14T06:46:16,343 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/.tmp/info/80fc92c800d24bb0946e0ceea452e155 as hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/info/80fc92c800d24bb0946e0ceea452e155 2024-11-14T06:46:16,349 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/info/80fc92c800d24bb0946e0ceea452e155, entries=1, sequenceid=55, filesize=5.9 K 2024-11-14T06:46:16,350 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 4c50aa91d588b8b257a9bbc495433f09 in 437ms, sequenceid=55, compaction requested=true 2024-11-14T06:46:16,350 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4c50aa91d588b8b257a9bbc495433f09: 2024-11-14T06:46:16,350 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=29.3 K, sizeToCheck=16.0 K 2024-11-14T06:46:16,350 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T06:46:16,350 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/info/f974ae895eae4e1f9500941b8b0872b8 because midkey is the same as first or last row 2024-11-14T06:46:16,350 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4c50aa91d588b8b257a9bbc495433f09:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T06:46:16,350 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T06:46:16,350 DEBUG [RS:0;20680646cf8a:39105-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T06:46:16,352 DEBUG [RS:0;20680646cf8a:39105-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 30048 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T06:46:16,352 DEBUG [RS:0;20680646cf8a:39105-shortCompactions-0 {}] regionserver.HStore(1541): 4c50aa91d588b8b257a9bbc495433f09/info is initiating minor compaction (all files) 2024-11-14T06:46:16,352 INFO [RS:0;20680646cf8a:39105-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 4c50aa91d588b8b257a9bbc495433f09/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731566754346.4c50aa91d588b8b257a9bbc495433f09. 2024-11-14T06:46:16,352 INFO [RS:0;20680646cf8a:39105-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/info/f974ae895eae4e1f9500941b8b0872b8, hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/info/1ad395ed7ea9474ead9e9cd7e6584e72, hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/info/80fc92c800d24bb0946e0ceea452e155] into tmpdir=hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/.tmp, totalSize=29.3 K 2024-11-14T06:46:16,353 DEBUG [RS:0;20680646cf8a:39105-shortCompactions-0 {}] compactions.Compactor(225): Compacting f974ae895eae4e1f9500941b8b0872b8, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1731566766980 2024-11-14T06:46:16,353 DEBUG [RS:0;20680646cf8a:39105-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1ad395ed7ea9474ead9e9cd7e6584e72, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1731566774486 2024-11-14T06:46:16,353 DEBUG [RS:0;20680646cf8a:39105-shortCompactions-0 {}] compactions.Compactor(225): Compacting 80fc92c800d24bb0946e0ceea452e155, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1731566775910 2024-11-14T06:46:16,368 INFO [RS:0;20680646cf8a:39105-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4c50aa91d588b8b257a9bbc495433f09#info#compaction#24 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T06:46:16,369 DEBUG [RS:0;20680646cf8a:39105-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/.tmp/info/90938c37c6f743c8aab9082f83a300d6 is 1080, key is row0002/info:/1731566766980/Put/seqid=0 2024-11-14T06:46:16,371 WARN [Thread-970 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741882_1065 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:16,371 WARN [Thread-970 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1678146812-172.17.0.2-1731566752608:blk_1073741882_1065 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43453,DS-230efee1-675c-4a81-b43c-66956f7e849f,DISK], DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43453,DS-230efee1-675c-4a81-b43c-66956f7e849f,DISK]) is bad. 2024-11-14T06:46:16,371 WARN [Thread-970 {}] hdfs.DataStreamer(1850): Abandoning BP-1678146812-172.17.0.2-1731566752608:blk_1073741882_1065 2024-11-14T06:46:16,371 WARN [Thread-970 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43453,DS-230efee1-675c-4a81-b43c-66956f7e849f,DISK] 2024-11-14T06:46:16,374 WARN [Thread-970 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741883_1066 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45879 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:16,374 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:54886 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741883_1066] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/data/data6]'}, localName='127.0.0.1:35733', datanodeUuid='92e56133-38bc-451b-a9b6-c6dd482e646f', xmitsInProgress=0}:Exception transferring block BP-1678146812-172.17.0.2-1731566752608:blk_1073741883_1066 to mirror 127.0.0.1:45879 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:16,374 WARN [Thread-970 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1678146812-172.17.0.2-1731566752608:blk_1073741883_1066 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35733,DS-9d414d48-5dd3-4bdb-a1b3-ebac272af800,DISK], DatanodeInfoWithStorage[127.0.0.1:45879,DS-c73e235d-0240-4454-afa5-0e0d9a19b6c4,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45879,DS-c73e235d-0240-4454-afa5-0e0d9a19b6c4,DISK]) is bad. 2024-11-14T06:46:16,374 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:54886 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741883_1066] {}] datanode.BlockReceiver(316): Block 1073741883 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-14T06:46:16,374 WARN [Thread-970 {}] hdfs.DataStreamer(1850): Abandoning BP-1678146812-172.17.0.2-1731566752608:blk_1073741883_1066 2024-11-14T06:46:16,374 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:54886 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741883_1066] {}] datanode.DataXceiver(331): 127.0.0.1:35733:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54886 dst: /127.0.0.1:35733 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:16,375 WARN [Thread-970 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45879,DS-c73e235d-0240-4454-afa5-0e0d9a19b6c4,DISK] 2024-11-14T06:46:16,377 WARN [Thread-970 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741884_1067 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36149 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:16,377 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:54896 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741884_1067] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/data/data6]'}, localName='127.0.0.1:35733', datanodeUuid='92e56133-38bc-451b-a9b6-c6dd482e646f', xmitsInProgress=0}:Exception transferring block BP-1678146812-172.17.0.2-1731566752608:blk_1073741884_1067 to mirror 127.0.0.1:36149 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:16,378 WARN [Thread-970 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1678146812-172.17.0.2-1731566752608:blk_1073741884_1067 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35733,DS-9d414d48-5dd3-4bdb-a1b3-ebac272af800,DISK], DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]) is bad. 2024-11-14T06:46:16,378 WARN [Thread-970 {}] hdfs.DataStreamer(1850): Abandoning BP-1678146812-172.17.0.2-1731566752608:blk_1073741884_1067 2024-11-14T06:46:16,378 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:54896 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741884_1067] {}] datanode.BlockReceiver(316): Block 1073741884 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-14T06:46:16,378 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:54896 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741884_1067] {}] datanode.DataXceiver(331): 127.0.0.1:35733:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54896 dst: /127.0.0.1:35733 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:16,378 WARN [Thread-970 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK] 2024-11-14T06:46:16,380 WARN [Thread-970 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741885_1068 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:16,380 WARN [Thread-970 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1678146812-172.17.0.2-1731566752608:blk_1073741885_1068 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40987,DS-5415e41b-ebdb-4ff1-838b-53415f043d44,DISK], DatanodeInfoWithStorage[127.0.0.1:35733,DS-9d414d48-5dd3-4bdb-a1b3-ebac272af800,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40987,DS-5415e41b-ebdb-4ff1-838b-53415f043d44,DISK]) is bad. 2024-11-14T06:46:16,380 WARN [Thread-970 {}] hdfs.DataStreamer(1850): Abandoning BP-1678146812-172.17.0.2-1731566752608:blk_1073741885_1068 2024-11-14T06:46:16,381 WARN [Thread-970 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40987,DS-5415e41b-ebdb-4ff1-838b-53415f043d44,DISK] 2024-11-14T06:46:16,382 WARN [IPC Server handler 3 on default port 33995 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-14T06:46:16,382 WARN [IPC Server handler 3 on default port 33995 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-14T06:46:16,382 WARN [IPC Server handler 3 on default port 33995 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-14T06:46:16,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35733 is added to blk_1073741886_1069 (size=18097) 2024-11-14T06:46:16,792 DEBUG [RS:0;20680646cf8a:39105-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/.tmp/info/90938c37c6f743c8aab9082f83a300d6 as hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/info/90938c37c6f743c8aab9082f83a300d6 2024-11-14T06:46:16,799 INFO [RS:0;20680646cf8a:39105-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 4c50aa91d588b8b257a9bbc495433f09/info of 4c50aa91d588b8b257a9bbc495433f09 into 90938c37c6f743c8aab9082f83a300d6(size=17.7 K), total size for store is 17.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T06:46:16,799 DEBUG [RS:0;20680646cf8a:39105-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 4c50aa91d588b8b257a9bbc495433f09: 2024-11-14T06:46:16,799 INFO [RS:0;20680646cf8a:39105-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731566754346.4c50aa91d588b8b257a9bbc495433f09., storeName=4c50aa91d588b8b257a9bbc495433f09/info, priority=13, startTime=1731566776350; duration=0sec 2024-11-14T06:46:16,799 DEBUG [RS:0;20680646cf8a:39105-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-14T06:46:16,799 DEBUG [RS:0;20680646cf8a:39105-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T06:46:16,799 DEBUG [RS:0;20680646cf8a:39105-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/info/90938c37c6f743c8aab9082f83a300d6 because midkey is the same as first or last row 2024-11-14T06:46:16,799 DEBUG [RS:0;20680646cf8a:39105-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-14T06:46:16,799 DEBUG [RS:0;20680646cf8a:39105-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T06:46:16,799 DEBUG [RS:0;20680646cf8a:39105-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/info/90938c37c6f743c8aab9082f83a300d6 because midkey is the same as first or last row 2024-11-14T06:46:16,799 DEBUG [RS:0;20680646cf8a:39105-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-14T06:46:16,800 DEBUG [RS:0;20680646cf8a:39105-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T06:46:16,800 DEBUG [RS:0;20680646cf8a:39105-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/info/90938c37c6f743c8aab9082f83a300d6 because midkey is the same as first or last row 2024-11-14T06:46:16,800 DEBUG [RS:0;20680646cf8a:39105-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T06:46:16,800 DEBUG [RS:0;20680646cf8a:39105-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4c50aa91d588b8b257a9bbc495433f09:info 2024-11-14T06:46:17,035 WARN [regionserver/20680646cf8a:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-11-14T06:46:17,035 INFO [regionserver/20680646cf8a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:17,144 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T06:46:17,149 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T06:46:17,149 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T06:46:17,149 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T06:46:17,150 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T06:46:17,150 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2852206a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/hadoop.log.dir/,AVAILABLE} 2024-11-14T06:46:17,150 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@c1d8e25{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T06:46:17,244 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@47bbe019{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/java.io.tmpdir/jetty-localhost-41957-hadoop-hdfs-3_4_1-tests_jar-_-any-16804166613259382183/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T06:46:17,245 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6f8ca33c{HTTP/1.1, (http/1.1)}{localhost:41957} 2024-11-14T06:46:17,245 INFO [Time-limited test {}] server.Server(415): Started @128841ms 2024-11-14T06:46:17,246 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T06:46:17,316 WARN [Thread-990 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T06:46:17,324 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x481e76c020499db8 with lease ID 0xba885f94c88789a7: from storage DS-230efee1-675c-4a81-b43c-66956f7e849f node DatanodeRegistration(127.0.0.1:46121, datanodeUuid=ff1689b7-6834-4e0f-9cff-2acd1fab3999, infoPort=37355, infoSecurePort=0, ipcPort=39103, storageInfo=lv=-57;cid=testClusterID;nsid=1791374989;c=1731566752608), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T06:46:17,324 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x481e76c020499db8 with lease ID 0xba885f94c88789a7: from storage DS-4f4b6a52-61a1-4195-8aa0-896965efc107 node DatanodeRegistration(127.0.0.1:46121, datanodeUuid=ff1689b7-6834-4e0f-9cff-2acd1fab3999, infoPort=37355, infoSecurePort=0, ipcPort=39103, storageInfo=lv=-57;cid=testClusterID;nsid=1791374989;c=1731566752608), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T06:46:17,353 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:17,618 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@592222f4[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35733, datanodeUuid=92e56133-38bc-451b-a9b6-c6dd482e646f, infoPort=37411, infoSecurePort=0, ipcPort=43239, storageInfo=lv=-57;cid=testClusterID;nsid=1791374989;c=1731566752608):Failed to transfer BP-1678146812-172.17.0.2-1731566752608:blk_1073741866_1049 to 127.0.0.1:45879 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:17,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46121 is added to blk_1073741871_1054 (size=6027) 2024-11-14T06:46:18,309 INFO [regionserver/20680646cf8a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:18,619 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@592222f4[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35733, datanodeUuid=92e56133-38bc-451b-a9b6-c6dd482e646f, infoPort=37411, infoSecurePort=0, ipcPort=43239, storageInfo=lv=-57;cid=testClusterID;nsid=1791374989;c=1731566752608):Failed to transfer BP-1678146812-172.17.0.2-1731566752608:blk_1073741881_1064 to 127.0.0.1:40987 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:18,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46121 is added to blk_1073741856_1039 (size=13591) 2024-11-14T06:46:19,035 INFO [regionserver/20680646cf8a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:19,354 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:20,309 INFO [regionserver/20680646cf8a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:20,623 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@aef2d4f[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35733, datanodeUuid=92e56133-38bc-451b-a9b6-c6dd482e646f, infoPort=37411, infoSecurePort=0, ipcPort=43239, storageInfo=lv=-57;cid=testClusterID;nsid=1791374989;c=1731566752608):Failed to transfer BP-1678146812-172.17.0.2-1731566752608:blk_1073741886_1069 to 127.0.0.1:45879 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:21,036 INFO [regionserver/20680646cf8a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:21,354 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:22,310 INFO [regionserver/20680646cf8a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:23,037 INFO [regionserver/20680646cf8a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:23,201 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-14T06:46:23,355 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:23,460 ERROR [FSHLog-0-hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/MasterData-prefix:20680646cf8a,38495,1731566753222 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:23,460 WARN [FSHLog-0-hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/MasterData-prefix:20680646cf8a,38495,1731566753222 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:23,461 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 20680646cf8a%2C38495%2C1731566753222:(num 1731566753373) roll requested 2024-11-14T06:46:23,462 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 20680646cf8a%2C38495%2C1731566753222.1731566783461 2024-11-14T06:46:23,469 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:46:23,470 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:46:23,470 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:46:23,470 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:46:23,470 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:46:23,471 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/MasterData/WALs/20680646cf8a,38495,1731566753222/20680646cf8a%2C38495%2C1731566753222.1731566753373 with entries=54, filesize=26.68 KB; new WAL /user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/MasterData/WALs/20680646cf8a,38495,1731566753222/20680646cf8a%2C38495%2C1731566753222.1731566783461 2024-11-14T06:46:23,471 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:23,471 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:23,471 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/MasterData/WALs/20680646cf8a,38495,1731566753222/20680646cf8a%2C38495%2C1731566753222.1731566753373 2024-11-14T06:46:23,471 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37355:37355),(127.0.0.1/127.0.0.1:37411:37411)] 2024-11-14T06:46:23,472 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/MasterData/WALs/20680646cf8a,38495,1731566753222/20680646cf8a%2C38495%2C1731566753222.1731566753373 is not closed yet, will try archiving it next time 2024-11-14T06:46:23,472 WARN [IPC Server handler 4 on default port 33995 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/MasterData/WALs/20680646cf8a,38495,1731566753222/20680646cf8a%2C38495%2C1731566753222.1731566753373 has not been closed. Lease recovery is in progress. RecoveryId = 1071 for block blk_1073741830_1006 2024-11-14T06:46:23,472 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/MasterData/WALs/20680646cf8a,38495,1731566753222/20680646cf8a%2C38495%2C1731566753222.1731566753373 after 1ms 2024-11-14T06:46:24,311 INFO [regionserver/20680646cf8a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:25,037 INFO [regionserver/20680646cf8a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:26,312 INFO [regionserver/20680646cf8a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:27,038 INFO [regionserver/20680646cf8a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:27,338 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@1d25793c {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1678146812-172.17.0.2-1731566752608:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:36149,null,null]) java.net.ConnectException: Call From 20680646cf8a/172.17.0.2 to localhost:34791 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-14T06:46:27,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46121 is added to blk_1073741833_1019 (size=455) 2024-11-14T06:46:27,475 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/MasterData/WALs/20680646cf8a,38495,1731566753222/20680646cf8a%2C38495%2C1731566753222.1731566753373 after 4004ms 2024-11-14T06:46:28,007 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.1731566753665 to hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/oldWALs/20680646cf8a%2C39105%2C1731566753272.1731566753665 2024-11-14T06:46:28,011 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.1731566772990 to hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/oldWALs/20680646cf8a%2C39105%2C1731566753272.1731566772990 2024-11-14T06:46:28,313 INFO [regionserver/20680646cf8a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:29,039 INFO [regionserver/20680646cf8a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:29,324 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@2cf2a672[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:46121, datanodeUuid=ff1689b7-6834-4e0f-9cff-2acd1fab3999, infoPort=37355, infoSecurePort=0, ipcPort=39103, storageInfo=lv=-57;cid=testClusterID;nsid=1791374989;c=1731566752608):Failed to transfer BP-1678146812-172.17.0.2-1731566752608:blk_1073741833_1019 to 127.0.0.1:40987 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:30,313 INFO [regionserver/20680646cf8a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:30,760 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 20680646cf8a%2C39105%2C1731566753272.1731566790759 2024-11-14T06:46:30,768 WARN [Thread-1022 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741888_1072 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:30,768 WARN [Thread-1022 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1678146812-172.17.0.2-1731566752608:blk_1073741888_1072 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40987,DS-5415e41b-ebdb-4ff1-838b-53415f043d44,DISK], DatanodeInfoWithStorage[127.0.0.1:46121,DS-230efee1-675c-4a81-b43c-66956f7e849f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40987,DS-5415e41b-ebdb-4ff1-838b-53415f043d44,DISK]) is bad. 2024-11-14T06:46:30,768 WARN [Thread-1022 {}] hdfs.DataStreamer(1850): Abandoning BP-1678146812-172.17.0.2-1731566752608:blk_1073741888_1072 2024-11-14T06:46:30,769 WARN [Thread-1022 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40987,DS-5415e41b-ebdb-4ff1-838b-53415f043d44,DISK] 2024-11-14T06:46:30,770 WARN [Thread-1022 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741889_1073 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:30,770 WARN [Thread-1022 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1678146812-172.17.0.2-1731566752608:blk_1073741889_1073 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK], DatanodeInfoWithStorage[127.0.0.1:46121,DS-230efee1-675c-4a81-b43c-66956f7e849f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]) is bad. 2024-11-14T06:46:30,770 WARN [Thread-1022 {}] hdfs.DataStreamer(1850): Abandoning BP-1678146812-172.17.0.2-1731566752608:blk_1073741889_1073 2024-11-14T06:46:30,771 WARN [Thread-1022 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK] 2024-11-14T06:46:30,772 WARN [Thread-1022 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741890_1074 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:30,772 WARN [Thread-1022 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1678146812-172.17.0.2-1731566752608:blk_1073741890_1074 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45879,DS-c73e235d-0240-4454-afa5-0e0d9a19b6c4,DISK], DatanodeInfoWithStorage[127.0.0.1:35733,DS-9d414d48-5dd3-4bdb-a1b3-ebac272af800,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45879,DS-c73e235d-0240-4454-afa5-0e0d9a19b6c4,DISK]) is bad. 2024-11-14T06:46:30,772 WARN [Thread-1022 {}] hdfs.DataStreamer(1850): Abandoning BP-1678146812-172.17.0.2-1731566752608:blk_1073741890_1074 2024-11-14T06:46:30,772 WARN [Thread-1022 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45879,DS-c73e235d-0240-4454-afa5-0e0d9a19b6c4,DISK] 2024-11-14T06:46:30,776 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:46:30,777 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:46:30,777 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:46:30,777 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:46:30,777 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:46:30,777 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.1731566775012 with entries=13, filesize=12.60 KB; new WAL /user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.1731566790759 2024-11-14T06:46:30,778 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37411:37411),(127.0.0.1/127.0.0.1:37355:37355)] 2024-11-14T06:46:30,778 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.1731566775012 is not closed yet, will try archiving it next time 2024-11-14T06:46:30,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35733 is added to blk_1073741876_1059 (size=12911) 2024-11-14T06:46:30,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39105 {}] regionserver.HRegion(8855): Flush requested on 4c50aa91d588b8b257a9bbc495433f09 2024-11-14T06:46:30,782 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4c50aa91d588b8b257a9bbc495433f09 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-14T06:46:30,786 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/.tmp/info/314959b91b6f4e72a5f0ad5b96498c6e is 1080, key is row0013/info:/1731566790779/Put/seqid=0 2024-11-14T06:46:30,788 WARN [Thread-1028 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741892_1076 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:30,788 WARN [Thread-1028 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1678146812-172.17.0.2-1731566752608:blk_1073741892_1076 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45879,DS-c73e235d-0240-4454-afa5-0e0d9a19b6c4,DISK], DatanodeInfoWithStorage[127.0.0.1:40987,DS-5415e41b-ebdb-4ff1-838b-53415f043d44,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45879,DS-c73e235d-0240-4454-afa5-0e0d9a19b6c4,DISK]) is bad. 2024-11-14T06:46:30,788 WARN [Thread-1028 {}] hdfs.DataStreamer(1850): Abandoning BP-1678146812-172.17.0.2-1731566752608:blk_1073741892_1076 2024-11-14T06:46:30,789 WARN [Thread-1028 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45879,DS-c73e235d-0240-4454-afa5-0e0d9a19b6c4,DISK] 2024-11-14T06:46:30,790 WARN [Thread-1028 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741893_1077 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:30,791 WARN [Thread-1028 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1678146812-172.17.0.2-1731566752608:blk_1073741893_1077 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK], DatanodeInfoWithStorage[127.0.0.1:35733,DS-9d414d48-5dd3-4bdb-a1b3-ebac272af800,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]) is bad. 2024-11-14T06:46:30,791 WARN [Thread-1028 {}] hdfs.DataStreamer(1850): Abandoning BP-1678146812-172.17.0.2-1731566752608:blk_1073741893_1077 2024-11-14T06:46:30,791 WARN [Thread-1028 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK] 2024-11-14T06:46:30,793 WARN [Thread-1028 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741894_1078 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40987 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:30,793 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:34716 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741894_1078] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/data/data4]'}, localName='127.0.0.1:46121', datanodeUuid='ff1689b7-6834-4e0f-9cff-2acd1fab3999', xmitsInProgress=0}:Exception transferring block BP-1678146812-172.17.0.2-1731566752608:blk_1073741894_1078 to mirror 127.0.0.1:40987 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:30,794 WARN [Thread-1028 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1678146812-172.17.0.2-1731566752608:blk_1073741894_1078 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46121,DS-230efee1-675c-4a81-b43c-66956f7e849f,DISK], DatanodeInfoWithStorage[127.0.0.1:40987,DS-5415e41b-ebdb-4ff1-838b-53415f043d44,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40987,DS-5415e41b-ebdb-4ff1-838b-53415f043d44,DISK]) is bad. 2024-11-14T06:46:30,794 WARN [Thread-1028 {}] hdfs.DataStreamer(1850): Abandoning BP-1678146812-172.17.0.2-1731566752608:blk_1073741894_1078 2024-11-14T06:46:30,794 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:34716 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741894_1078] {}] datanode.BlockReceiver(316): Block 1073741894 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-14T06:46:30,794 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:34716 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741894_1078] {}] datanode.DataXceiver(331): 127.0.0.1:46121:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34716 dst: /127.0.0.1:46121 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:30,795 WARN [Thread-1028 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40987,DS-5415e41b-ebdb-4ff1-838b-53415f043d44,DISK] 2024-11-14T06:46:30,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46121 is added to blk_1073741895_1079 (size=8190) 2024-11-14T06:46:30,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35733 is added to blk_1073741895_1079 (size=8190) 2024-11-14T06:46:30,800 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/.tmp/info/314959b91b6f4e72a5f0ad5b96498c6e 2024-11-14T06:46:30,808 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/.tmp/info/314959b91b6f4e72a5f0ad5b96498c6e as hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/info/314959b91b6f4e72a5f0ad5b96498c6e 2024-11-14T06:46:30,816 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/info/314959b91b6f4e72a5f0ad5b96498c6e, entries=3, sequenceid=66, filesize=8.0 K 2024-11-14T06:46:30,817 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7527, heapSize ~8.11 KB/8304, currentSize=9.46 KB/9683 for 4c50aa91d588b8b257a9bbc495433f09 in 35ms, sequenceid=66, compaction requested=false 2024-11-14T06:46:30,817 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4c50aa91d588b8b257a9bbc495433f09: 2024-11-14T06:46:30,818 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=25.7 K, sizeToCheck=16.0 K 2024-11-14T06:46:30,818 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T06:46:30,818 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/info/90938c37c6f743c8aab9082f83a300d6 because midkey is the same as first or last row 2024-11-14T06:46:31,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39105 {}] regionserver.HRegion(8855): Flush requested on 4c50aa91d588b8b257a9bbc495433f09 2024-11-14T06:46:31,010 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4c50aa91d588b8b257a9bbc495433f09 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-11-14T06:46:31,018 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/.tmp/info/021b162ae3a94b3ca7e435ef2c7f3126 is 1080, key is row0015/info:/1731566790783/Put/seqid=0 2024-11-14T06:46:31,022 WARN [Thread-1037 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741896_1080 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40987 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:31,022 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:34738 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741896_1080] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/data/data4]'}, localName='127.0.0.1:46121', datanodeUuid='ff1689b7-6834-4e0f-9cff-2acd1fab3999', xmitsInProgress=0}:Exception transferring block BP-1678146812-172.17.0.2-1731566752608:blk_1073741896_1080 to mirror 127.0.0.1:40987 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:31,022 WARN [Thread-1037 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1678146812-172.17.0.2-1731566752608:blk_1073741896_1080 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46121,DS-230efee1-675c-4a81-b43c-66956f7e849f,DISK], DatanodeInfoWithStorage[127.0.0.1:40987,DS-5415e41b-ebdb-4ff1-838b-53415f043d44,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40987,DS-5415e41b-ebdb-4ff1-838b-53415f043d44,DISK]) is bad. 2024-11-14T06:46:31,022 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:34738 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741896_1080] {}] datanode.BlockReceiver(316): Block 1073741896 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-14T06:46:31,022 WARN [Thread-1037 {}] hdfs.DataStreamer(1850): Abandoning BP-1678146812-172.17.0.2-1731566752608:blk_1073741896_1080 2024-11-14T06:46:31,022 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:34738 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741896_1080] {}] datanode.DataXceiver(331): 127.0.0.1:46121:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34738 dst: /127.0.0.1:46121 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:31,023 WARN [Thread-1037 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40987,DS-5415e41b-ebdb-4ff1-838b-53415f043d44,DISK] 2024-11-14T06:46:31,025 WARN [Thread-1037 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741897_1081 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:31,025 WARN [Thread-1037 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1678146812-172.17.0.2-1731566752608:blk_1073741897_1081 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK], DatanodeInfoWithStorage[127.0.0.1:45879,DS-c73e235d-0240-4454-afa5-0e0d9a19b6c4,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]) is bad. 2024-11-14T06:46:31,025 WARN [Thread-1037 {}] hdfs.DataStreamer(1850): Abandoning BP-1678146812-172.17.0.2-1731566752608:blk_1073741897_1081 2024-11-14T06:46:31,025 WARN [Thread-1037 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK] 2024-11-14T06:46:31,026 WARN [Thread-1037 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741898_1082 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:31,026 WARN [Thread-1037 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1678146812-172.17.0.2-1731566752608:blk_1073741898_1082 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45879,DS-c73e235d-0240-4454-afa5-0e0d9a19b6c4,DISK], DatanodeInfoWithStorage[127.0.0.1:46121,DS-230efee1-675c-4a81-b43c-66956f7e849f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45879,DS-c73e235d-0240-4454-afa5-0e0d9a19b6c4,DISK]) is bad. 2024-11-14T06:46:31,027 WARN [Thread-1037 {}] hdfs.DataStreamer(1850): Abandoning BP-1678146812-172.17.0.2-1731566752608:blk_1073741898_1082 2024-11-14T06:46:31,027 WARN [Thread-1037 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45879,DS-c73e235d-0240-4454-afa5-0e0d9a19b6c4,DISK] 2024-11-14T06:46:31,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46121 is added to blk_1073741899_1083 (size=14660) 2024-11-14T06:46:31,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35733 is added to blk_1073741899_1083 (size=14660) 2024-11-14T06:46:31,032 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/.tmp/info/021b162ae3a94b3ca7e435ef2c7f3126 2024-11-14T06:46:31,038 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/.tmp/info/021b162ae3a94b3ca7e435ef2c7f3126 as hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/info/021b162ae3a94b3ca7e435ef2c7f3126 2024-11-14T06:46:31,039 INFO [regionserver/20680646cf8a:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-11-14T06:46:31,039 INFO [regionserver/20680646cf8a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:31,044 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/info/021b162ae3a94b3ca7e435ef2c7f3126, entries=9, sequenceid=79, filesize=14.3 K 2024-11-14T06:46:31,045 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10758, heapSize ~11.48 KB/11760, currentSize=0 B/0 for 4c50aa91d588b8b257a9bbc495433f09 in 36ms, sequenceid=79, compaction requested=true 2024-11-14T06:46:31,045 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4c50aa91d588b8b257a9bbc495433f09: 2024-11-14T06:46:31,045 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=40.0 K, sizeToCheck=16.0 K 2024-11-14T06:46:31,045 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T06:46:31,045 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/info/90938c37c6f743c8aab9082f83a300d6 because midkey is the same as first or last row 2024-11-14T06:46:31,045 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4c50aa91d588b8b257a9bbc495433f09:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T06:46:31,045 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T06:46:31,045 DEBUG [RS:0;20680646cf8a:39105-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T06:46:31,047 DEBUG [RS:0;20680646cf8a:39105-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40947 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T06:46:31,047 DEBUG [RS:0;20680646cf8a:39105-longCompactions-0 {}] regionserver.HStore(1541): 4c50aa91d588b8b257a9bbc495433f09/info is initiating minor compaction (all files) 2024-11-14T06:46:31,047 INFO [RS:0;20680646cf8a:39105-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 4c50aa91d588b8b257a9bbc495433f09/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731566754346.4c50aa91d588b8b257a9bbc495433f09. 2024-11-14T06:46:31,047 INFO [RS:0;20680646cf8a:39105-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/info/90938c37c6f743c8aab9082f83a300d6, hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/info/314959b91b6f4e72a5f0ad5b96498c6e, hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/info/021b162ae3a94b3ca7e435ef2c7f3126] into tmpdir=hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/.tmp, totalSize=40.0 K 2024-11-14T06:46:31,047 DEBUG [RS:0;20680646cf8a:39105-longCompactions-0 {}] compactions.Compactor(225): Compacting 90938c37c6f743c8aab9082f83a300d6, keycount=12, bloomtype=ROW, size=17.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1731566766980 2024-11-14T06:46:31,048 DEBUG [RS:0;20680646cf8a:39105-longCompactions-0 {}] compactions.Compactor(225): Compacting 314959b91b6f4e72a5f0ad5b96498c6e, keycount=3, bloomtype=ROW, size=8.0 K, encoding=NONE, compression=NONE, seqNum=66, earliestPutTs=1731566776928 2024-11-14T06:46:31,048 DEBUG [RS:0;20680646cf8a:39105-longCompactions-0 {}] compactions.Compactor(225): Compacting 021b162ae3a94b3ca7e435ef2c7f3126, keycount=9, bloomtype=ROW, size=14.3 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1731566790783 2024-11-14T06:46:31,061 INFO [RS:0;20680646cf8a:39105-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4c50aa91d588b8b257a9bbc495433f09#info#compaction#27 average throughput is 11.29 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T06:46:31,061 DEBUG [RS:0;20680646cf8a:39105-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/.tmp/info/37a121ef881c40419487945f0ff00025 is 1080, key is row0002/info:/1731566766980/Put/seqid=0 2024-11-14T06:46:31,063 WARN [Thread-1045 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741900_1084 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:31,063 WARN [Thread-1045 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1678146812-172.17.0.2-1731566752608:blk_1073741900_1084 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40987,DS-5415e41b-ebdb-4ff1-838b-53415f043d44,DISK], DatanodeInfoWithStorage[127.0.0.1:35733,DS-9d414d48-5dd3-4bdb-a1b3-ebac272af800,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40987,DS-5415e41b-ebdb-4ff1-838b-53415f043d44,DISK]) is bad. 2024-11-14T06:46:31,064 WARN [Thread-1045 {}] hdfs.DataStreamer(1850): Abandoning BP-1678146812-172.17.0.2-1731566752608:blk_1073741900_1084 2024-11-14T06:46:31,064 WARN [Thread-1045 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40987,DS-5415e41b-ebdb-4ff1-838b-53415f043d44,DISK] 2024-11-14T06:46:31,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46121 is added to blk_1073741901_1085 (size=28989) 2024-11-14T06:46:31,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35733 is added to blk_1073741901_1085 (size=28989) 2024-11-14T06:46:31,075 DEBUG [RS:0;20680646cf8a:39105-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/.tmp/info/37a121ef881c40419487945f0ff00025 as hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/info/37a121ef881c40419487945f0ff00025 2024-11-14T06:46:31,082 INFO [RS:0;20680646cf8a:39105-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 4c50aa91d588b8b257a9bbc495433f09/info of 4c50aa91d588b8b257a9bbc495433f09 into 37a121ef881c40419487945f0ff00025(size=28.3 K), total size for store is 28.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T06:46:31,082 DEBUG [RS:0;20680646cf8a:39105-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 4c50aa91d588b8b257a9bbc495433f09: 2024-11-14T06:46:31,082 INFO [RS:0;20680646cf8a:39105-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731566754346.4c50aa91d588b8b257a9bbc495433f09., storeName=4c50aa91d588b8b257a9bbc495433f09/info, priority=13, startTime=1731566791045; duration=0sec 2024-11-14T06:46:31,082 DEBUG [RS:0;20680646cf8a:39105-longCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.3 K, sizeToCheck=16.0 K 2024-11-14T06:46:31,082 DEBUG [RS:0;20680646cf8a:39105-longCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T06:46:31,082 DEBUG [RS:0;20680646cf8a:39105-longCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/info/37a121ef881c40419487945f0ff00025 because midkey is the same as first or last row 2024-11-14T06:46:31,082 DEBUG [RS:0;20680646cf8a:39105-longCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.3 K, sizeToCheck=16.0 K 2024-11-14T06:46:31,082 DEBUG [RS:0;20680646cf8a:39105-longCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T06:46:31,082 DEBUG [RS:0;20680646cf8a:39105-longCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/info/37a121ef881c40419487945f0ff00025 because midkey is the same as first or last row 2024-11-14T06:46:31,082 DEBUG [RS:0;20680646cf8a:39105-longCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.3 K, sizeToCheck=16.0 K 2024-11-14T06:46:31,082 DEBUG [RS:0;20680646cf8a:39105-longCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T06:46:31,082 DEBUG [RS:0;20680646cf8a:39105-longCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/info/37a121ef881c40419487945f0ff00025 because midkey is the same as first or last row 2024-11-14T06:46:31,082 DEBUG [RS:0;20680646cf8a:39105-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T06:46:31,082 DEBUG [RS:0;20680646cf8a:39105-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4c50aa91d588b8b257a9bbc495433f09:info 2024-11-14T06:46:31,181 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.1731566775012 to hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/oldWALs/20680646cf8a%2C39105%2C1731566753272.1731566775012 2024-11-14T06:46:31,211 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-14T06:46:31,211 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T06:46:31,211 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T06:46:31,211 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T06:46:31,212 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T06:46:31,212 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-14T06:46:31,212 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-14T06:46:31,212 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=924258338, stopped=false 2024-11-14T06:46:31,212 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=20680646cf8a,38495,1731566753222 2024-11-14T06:46:31,214 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38495-0x1003cfb32220000, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T06:46:31,214 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39105-0x1003cfb32220001, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T06:46:31,214 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37019-0x1003cfb32220002, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T06:46:31,214 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38495-0x1003cfb32220000, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:46:31,214 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39105-0x1003cfb32220001, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:46:31,214 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37019-0x1003cfb32220002, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:46:31,214 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T06:46:31,214 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T06:46:31,215 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T06:46:31,215 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T06:46:31,215 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '20680646cf8a,39105,1731566753272' ***** 2024-11-14T06:46:31,215 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37019-0x1003cfb32220002, quorum=127.0.0.1:50335, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T06:46:31,215 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-14T06:46:31,215 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '20680646cf8a,37019,1731566754231' ***** 2024-11-14T06:46:31,215 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39105-0x1003cfb32220001, quorum=127.0.0.1:50335, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T06:46:31,215 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-14T06:46:31,216 INFO [RS:0;20680646cf8a:39105 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-14T06:46:31,216 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38495-0x1003cfb32220000, quorum=127.0.0.1:50335, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T06:46:31,216 INFO [RS:0;20680646cf8a:39105 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-14T06:46:31,216 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-14T06:46:31,216 INFO [RS:0;20680646cf8a:39105 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-14T06:46:31,216 INFO [RS:0;20680646cf8a:39105 {}] regionserver.HRegionServer(3091): Received CLOSE for 4c50aa91d588b8b257a9bbc495433f09 2024-11-14T06:46:31,217 INFO [RS:1;20680646cf8a:37019 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-14T06:46:31,217 INFO [RS:0;20680646cf8a:39105 {}] regionserver.HRegionServer(959): stopping server 20680646cf8a,39105,1731566753272 2024-11-14T06:46:31,217 INFO [RS:1;20680646cf8a:37019 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-14T06:46:31,217 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-14T06:46:31,217 INFO [RS:0;20680646cf8a:39105 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T06:46:31,217 INFO [RS:1;20680646cf8a:37019 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-14T06:46:31,217 INFO [RS:1;20680646cf8a:37019 {}] regionserver.HRegionServer(959): stopping server 20680646cf8a,37019,1731566754231 2024-11-14T06:46:31,217 INFO [RS:1;20680646cf8a:37019 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T06:46:31,217 INFO [RS:0;20680646cf8a:39105 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;20680646cf8a:39105. 2024-11-14T06:46:31,217 INFO [RS:1;20680646cf8a:37019 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;20680646cf8a:37019. 2024-11-14T06:46:31,218 DEBUG [RS:0;20680646cf8a:39105 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T06:46:31,218 DEBUG [RS:1;20680646cf8a:37019 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T06:46:31,218 DEBUG [RS:1;20680646cf8a:37019 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T06:46:31,218 DEBUG [RS:0;20680646cf8a:39105 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T06:46:31,218 DEBUG [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 4c50aa91d588b8b257a9bbc495433f09, disabling compactions & flushes 2024-11-14T06:46:31,218 INFO [RS:1;20680646cf8a:37019 {}] regionserver.HRegionServer(976): stopping server 20680646cf8a,37019,1731566754231; all regions closed. 2024-11-14T06:46:31,218 INFO [RS:0;20680646cf8a:39105 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-14T06:46:31,218 INFO [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731566754346.4c50aa91d588b8b257a9bbc495433f09. 2024-11-14T06:46:31,218 INFO [RS:0;20680646cf8a:39105 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-14T06:46:31,218 INFO [RS:0;20680646cf8a:39105 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-14T06:46:31,218 DEBUG [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731566754346.4c50aa91d588b8b257a9bbc495433f09. 2024-11-14T06:46:31,218 DEBUG [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731566754346.4c50aa91d588b8b257a9bbc495433f09. after waiting 0 ms 2024-11-14T06:46:31,218 DEBUG [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731566754346.4c50aa91d588b8b257a9bbc495433f09. 2024-11-14T06:46:31,218 INFO [RS:0;20680646cf8a:39105 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-14T06:46:31,218 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:46:31,219 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:46:31,219 INFO [RS:0;20680646cf8a:39105 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-14T06:46:31,219 DEBUG [RS:0;20680646cf8a:39105 {}] regionserver.HRegionServer(1325): Online Regions={4c50aa91d588b8b257a9bbc495433f09=TestLogRolling-testLogRollOnDatanodeDeath,,1731566754346.4c50aa91d588b8b257a9bbc495433f09., 1588230740=hbase:meta,,1.1588230740} 2024-11-14T06:46:31,219 DEBUG [RS:0;20680646cf8a:39105 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 4c50aa91d588b8b257a9bbc495433f09 2024-11-14T06:46:31,219 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T06:46:31,219 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:46:31,219 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731566754346.4c50aa91d588b8b257a9bbc495433f09.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/info/3690e7cf64274d0681b32834c73475e5, hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/info/01c8672b52f042089e5629da9f864cce, hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/info/f974ae895eae4e1f9500941b8b0872b8, hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/info/d5e8b7c7450d45e28ec3e10333b5e0bd, hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/info/1ad395ed7ea9474ead9e9cd7e6584e72, hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/info/90938c37c6f743c8aab9082f83a300d6, hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/info/80fc92c800d24bb0946e0ceea452e155, hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/info/314959b91b6f4e72a5f0ad5b96498c6e, hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/info/021b162ae3a94b3ca7e435ef2c7f3126] to archive 2024-11-14T06:46:31,219 INFO [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T06:46:31,219 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:46:31,219 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T06:46:31,219 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:46:31,219 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T06:46:31,220 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T06:46:31,220 INFO [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-11-14T06:46:31,220 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:31,220 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:31,220 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 2024-11-14T06:46:31,220 ERROR [FSHLog-0-hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9-prefix:20680646cf8a,39105,1731566753272.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:31,220 WARN [FSHLog-0-hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9-prefix:20680646cf8a,39105,1731566753272.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:31,221 DEBUG [regionserver/20680646cf8a:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 20680646cf8a%2C39105%2C1731566753272.meta:.meta(num 1731566754094) roll requested 2024-11-14T06:46:31,221 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731566754346.4c50aa91d588b8b257a9bbc495433f09.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-14T06:46:31,221 INFO [regionserver/20680646cf8a:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 20680646cf8a%2C39105%2C1731566753272.meta.1731566791221.meta 2024-11-14T06:46:31,225 WARN [IPC Server handler 2 on default port 33995 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 has not been closed. Lease recovery is in progress. RecoveryId = 1086 for block blk_1073741837_1013 2024-11-14T06:46:31,225 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 after 5ms 2024-11-14T06:46:31,226 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731566754346.4c50aa91d588b8b257a9bbc495433f09.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/info/3690e7cf64274d0681b32834c73475e5 to hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/info/3690e7cf64274d0681b32834c73475e5 2024-11-14T06:46:31,228 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731566754346.4c50aa91d588b8b257a9bbc495433f09.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/info/01c8672b52f042089e5629da9f864cce to hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/info/01c8672b52f042089e5629da9f864cce 2024-11-14T06:46:31,229 WARN [Thread-1051 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741902_1087 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40987 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:31,229 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:42730 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741902_1087] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/data/data6]'}, localName='127.0.0.1:35733', datanodeUuid='92e56133-38bc-451b-a9b6-c6dd482e646f', xmitsInProgress=0}:Exception transferring block BP-1678146812-172.17.0.2-1731566752608:blk_1073741902_1087 to mirror 127.0.0.1:40987 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:31,229 WARN [Thread-1051 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1678146812-172.17.0.2-1731566752608:blk_1073741902_1087 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35733,DS-9d414d48-5dd3-4bdb-a1b3-ebac272af800,DISK], DatanodeInfoWithStorage[127.0.0.1:40987,DS-5415e41b-ebdb-4ff1-838b-53415f043d44,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40987,DS-5415e41b-ebdb-4ff1-838b-53415f043d44,DISK]) is bad. 2024-11-14T06:46:31,229 WARN [Thread-1051 {}] hdfs.DataStreamer(1850): Abandoning BP-1678146812-172.17.0.2-1731566752608:blk_1073741902_1087 2024-11-14T06:46:31,229 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:42730 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741902_1087] {}] datanode.BlockReceiver(316): Block 1073741902 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-14T06:46:31,230 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731566754346.4c50aa91d588b8b257a9bbc495433f09.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/info/f974ae895eae4e1f9500941b8b0872b8 to hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/info/f974ae895eae4e1f9500941b8b0872b8 2024-11-14T06:46:31,230 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:42730 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741902_1087] {}] datanode.DataXceiver(331): 127.0.0.1:35733:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42730 dst: /127.0.0.1:35733 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:31,230 WARN [Thread-1051 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40987,DS-5415e41b-ebdb-4ff1-838b-53415f043d44,DISK] 2024-11-14T06:46:31,231 WARN [Thread-1051 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741903_1088 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:31,231 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731566754346.4c50aa91d588b8b257a9bbc495433f09.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/info/d5e8b7c7450d45e28ec3e10333b5e0bd to hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/info/d5e8b7c7450d45e28ec3e10333b5e0bd 2024-11-14T06:46:31,231 WARN [Thread-1051 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1678146812-172.17.0.2-1731566752608:blk_1073741903_1088 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK], DatanodeInfoWithStorage[127.0.0.1:46121,DS-230efee1-675c-4a81-b43c-66956f7e849f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]) is bad. 2024-11-14T06:46:31,231 WARN [Thread-1051 {}] hdfs.DataStreamer(1850): Abandoning BP-1678146812-172.17.0.2-1731566752608:blk_1073741903_1088 2024-11-14T06:46:31,232 WARN [Thread-1051 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK] 2024-11-14T06:46:31,232 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731566754346.4c50aa91d588b8b257a9bbc495433f09.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/info/1ad395ed7ea9474ead9e9cd7e6584e72 to hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/info/1ad395ed7ea9474ead9e9cd7e6584e72 2024-11-14T06:46:31,234 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731566754346.4c50aa91d588b8b257a9bbc495433f09.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/info/90938c37c6f743c8aab9082f83a300d6 to hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/info/90938c37c6f743c8aab9082f83a300d6 2024-11-14T06:46:31,235 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731566754346.4c50aa91d588b8b257a9bbc495433f09.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/info/80fc92c800d24bb0946e0ceea452e155 to hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/info/80fc92c800d24bb0946e0ceea452e155 2024-11-14T06:46:31,235 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:46:31,235 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:46:31,236 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:46:31,236 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:46:31,236 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:46:31,236 INFO [regionserver/20680646cf8a:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566791221.meta 2024-11-14T06:46:31,236 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:31,236 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731566754346.4c50aa91d588b8b257a9bbc495433f09.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/info/314959b91b6f4e72a5f0ad5b96498c6e to hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/info/314959b91b6f4e72a5f0ad5b96498c6e 2024-11-14T06:46:31,236 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:31,236 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta 2024-11-14T06:46:31,237 WARN [IPC Server handler 0 on default port 33995 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta has not been closed. Lease recovery is in progress. RecoveryId = 1090 for block blk_1073741834_1010 2024-11-14T06:46:31,237 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta after 1ms 2024-11-14T06:46:31,237 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731566754346.4c50aa91d588b8b257a9bbc495433f09.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/info/021b162ae3a94b3ca7e435ef2c7f3126 to hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/info/021b162ae3a94b3ca7e435ef2c7f3126 2024-11-14T06:46:31,238 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731566754346.4c50aa91d588b8b257a9bbc495433f09.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=20680646cf8a:38495 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-14T06:46:31,238 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731566754346.4c50aa91d588b8b257a9bbc495433f09.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [3690e7cf64274d0681b32834c73475e5=10347, 01c8672b52f042089e5629da9f864cce=12506, f974ae895eae4e1f9500941b8b0872b8=17994, d5e8b7c7450d45e28ec3e10333b5e0bd=6027, 1ad395ed7ea9474ead9e9cd7e6584e72=6027, 90938c37c6f743c8aab9082f83a300d6=18097, 80fc92c800d24bb0946e0ceea452e155=6027, 314959b91b6f4e72a5f0ad5b96498c6e=8190, 021b162ae3a94b3ca7e435ef2c7f3126=14660] 2024-11-14T06:46:31,241 DEBUG [regionserver/20680646cf8a:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37355:37355),(127.0.0.1/127.0.0.1:37411:37411)] 2024-11-14T06:46:31,241 DEBUG [regionserver/20680646cf8a:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta is not closed yet, will try archiving it next time 2024-11-14T06:46:31,245 DEBUG [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4c50aa91d588b8b257a9bbc495433f09/recovered.edits/83.seqid, newMaxSeqId=83, maxSeqId=1 2024-11-14T06:46:31,246 INFO [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731566754346.4c50aa91d588b8b257a9bbc495433f09. 2024-11-14T06:46:31,246 DEBUG [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 4c50aa91d588b8b257a9bbc495433f09: Waiting for close lock at 1731566791218Running coprocessor pre-close hooks at 1731566791218Disabling compacts and flushes for region at 1731566791218Disabling writes for close at 1731566791218Writing region close event to WAL at 1731566791241 (+23 ms)Running coprocessor post-close hooks at 1731566791246 (+5 ms)Closed at 1731566791246 2024-11-14T06:46:31,246 DEBUG [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731566754346.4c50aa91d588b8b257a9bbc495433f09. 2024-11-14T06:46:31,256 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/hbase/meta/1588230740/.tmp/info/856f5289806a4e50bc59c602844172e9 is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1731566754346.4c50aa91d588b8b257a9bbc495433f09./info:regioninfo/1731566754716/Put/seqid=0 2024-11-14T06:46:31,257 WARN [Thread-1058 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741905_1091 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:31,258 WARN [Thread-1058 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1678146812-172.17.0.2-1731566752608:blk_1073741905_1091 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK], DatanodeInfoWithStorage[127.0.0.1:45879,DS-c73e235d-0240-4454-afa5-0e0d9a19b6c4,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]) is bad. 2024-11-14T06:46:31,258 WARN [Thread-1058 {}] hdfs.DataStreamer(1850): Abandoning BP-1678146812-172.17.0.2-1731566752608:blk_1073741905_1091 2024-11-14T06:46:31,258 WARN [Thread-1058 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK] 2024-11-14T06:46:31,260 WARN [Thread-1058 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741906_1092 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45879 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:31,260 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:42756 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741906_1092] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/data/data6]'}, localName='127.0.0.1:35733', datanodeUuid='92e56133-38bc-451b-a9b6-c6dd482e646f', xmitsInProgress=0}:Exception transferring block BP-1678146812-172.17.0.2-1731566752608:blk_1073741906_1092 to mirror 127.0.0.1:45879 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:31,261 WARN [Thread-1058 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1678146812-172.17.0.2-1731566752608:blk_1073741906_1092 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35733,DS-9d414d48-5dd3-4bdb-a1b3-ebac272af800,DISK], DatanodeInfoWithStorage[127.0.0.1:45879,DS-c73e235d-0240-4454-afa5-0e0d9a19b6c4,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45879,DS-c73e235d-0240-4454-afa5-0e0d9a19b6c4,DISK]) is bad. 2024-11-14T06:46:31,261 WARN [Thread-1058 {}] hdfs.DataStreamer(1850): Abandoning BP-1678146812-172.17.0.2-1731566752608:blk_1073741906_1092 2024-11-14T06:46:31,261 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:42756 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741906_1092] {}] datanode.BlockReceiver(316): Block 1073741906 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-14T06:46:31,261 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:42756 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741906_1092] {}] datanode.DataXceiver(331): 127.0.0.1:35733:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42756 dst: /127.0.0.1:35733 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:31,261 WARN [Thread-1058 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45879,DS-c73e235d-0240-4454-afa5-0e0d9a19b6c4,DISK] 2024-11-14T06:46:31,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35733 is added to blk_1073741907_1093 (size=7089) 2024-11-14T06:46:31,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46121 is added to blk_1073741907_1093 (size=7089) 2024-11-14T06:46:31,267 INFO [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/hbase/meta/1588230740/.tmp/info/856f5289806a4e50bc59c602844172e9 2024-11-14T06:46:31,287 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/hbase/meta/1588230740/.tmp/ns/7372a3a5d09d46d1a89de8516639f8f5 is 43, key is default/ns:d/1731566754165/Put/seqid=0 2024-11-14T06:46:31,289 WARN [Thread-1066 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741908_1094 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45879 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:31,289 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:34790 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741908_1094] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/data/data4]'}, localName='127.0.0.1:46121', datanodeUuid='ff1689b7-6834-4e0f-9cff-2acd1fab3999', xmitsInProgress=0}:Exception transferring block BP-1678146812-172.17.0.2-1731566752608:blk_1073741908_1094 to mirror 127.0.0.1:45879 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:31,289 WARN [Thread-1066 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1678146812-172.17.0.2-1731566752608:blk_1073741908_1094 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46121,DS-230efee1-675c-4a81-b43c-66956f7e849f,DISK], DatanodeInfoWithStorage[127.0.0.1:45879,DS-c73e235d-0240-4454-afa5-0e0d9a19b6c4,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45879,DS-c73e235d-0240-4454-afa5-0e0d9a19b6c4,DISK]) is bad. 2024-11-14T06:46:31,290 WARN [Thread-1066 {}] hdfs.DataStreamer(1850): Abandoning BP-1678146812-172.17.0.2-1731566752608:blk_1073741908_1094 2024-11-14T06:46:31,290 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:34790 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741908_1094] {}] datanode.BlockReceiver(316): Block 1073741908 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-14T06:46:31,290 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:34790 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741908_1094] {}] datanode.DataXceiver(331): 127.0.0.1:46121:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34790 dst: /127.0.0.1:46121 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:31,290 WARN [Thread-1066 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45879,DS-c73e235d-0240-4454-afa5-0e0d9a19b6c4,DISK] 2024-11-14T06:46:31,292 WARN [Thread-1066 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741909_1095 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40987 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:31,292 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:34794 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741909_1095] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/data/data4]'}, localName='127.0.0.1:46121', datanodeUuid='ff1689b7-6834-4e0f-9cff-2acd1fab3999', xmitsInProgress=0}:Exception transferring block BP-1678146812-172.17.0.2-1731566752608:blk_1073741909_1095 to mirror 127.0.0.1:40987 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:31,292 WARN [Thread-1066 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1678146812-172.17.0.2-1731566752608:blk_1073741909_1095 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46121,DS-230efee1-675c-4a81-b43c-66956f7e849f,DISK], DatanodeInfoWithStorage[127.0.0.1:40987,DS-5415e41b-ebdb-4ff1-838b-53415f043d44,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40987,DS-5415e41b-ebdb-4ff1-838b-53415f043d44,DISK]) is bad. 2024-11-14T06:46:31,292 WARN [Thread-1066 {}] hdfs.DataStreamer(1850): Abandoning BP-1678146812-172.17.0.2-1731566752608:blk_1073741909_1095 2024-11-14T06:46:31,292 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:34794 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741909_1095] {}] datanode.BlockReceiver(316): Block 1073741909 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-14T06:46:31,292 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:34794 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741909_1095] {}] datanode.DataXceiver(331): 127.0.0.1:46121:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34794 dst: /127.0.0.1:46121 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:31,293 WARN [Thread-1066 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40987,DS-5415e41b-ebdb-4ff1-838b-53415f043d44,DISK] 2024-11-14T06:46:31,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46121 is added to blk_1073741910_1096 (size=5153) 2024-11-14T06:46:31,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35733 is added to blk_1073741910_1096 (size=5153) 2024-11-14T06:46:31,298 INFO [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/hbase/meta/1588230740/.tmp/ns/7372a3a5d09d46d1a89de8516639f8f5 2024-11-14T06:46:31,316 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/hbase/meta/1588230740/.tmp/table/3a595acb715c4723a32391ed707d0886 is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1731566754728/Put/seqid=0 2024-11-14T06:46:31,318 WARN [Thread-1074 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741911_1097 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:31,318 WARN [Thread-1074 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1678146812-172.17.0.2-1731566752608:blk_1073741911_1097 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK], DatanodeInfoWithStorage[127.0.0.1:46121,DS-230efee1-675c-4a81-b43c-66956f7e849f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK]) is bad. 2024-11-14T06:46:31,318 WARN [Thread-1074 {}] hdfs.DataStreamer(1850): Abandoning BP-1678146812-172.17.0.2-1731566752608:blk_1073741911_1097 2024-11-14T06:46:31,319 WARN [Thread-1074 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36149,DS-6d8175bd-2995-46c7-bac3-4065a1e8b823,DISK] 2024-11-14T06:46:31,320 WARN [Thread-1074 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741912_1098 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45879 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:31,320 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:34824 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741912_1098] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/data/data4]'}, localName='127.0.0.1:46121', datanodeUuid='ff1689b7-6834-4e0f-9cff-2acd1fab3999', xmitsInProgress=0}:Exception transferring block BP-1678146812-172.17.0.2-1731566752608:blk_1073741912_1098 to mirror 127.0.0.1:45879 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:31,321 WARN [Thread-1074 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1678146812-172.17.0.2-1731566752608:blk_1073741912_1098 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46121,DS-230efee1-675c-4a81-b43c-66956f7e849f,DISK], DatanodeInfoWithStorage[127.0.0.1:45879,DS-c73e235d-0240-4454-afa5-0e0d9a19b6c4,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45879,DS-c73e235d-0240-4454-afa5-0e0d9a19b6c4,DISK]) is bad. 2024-11-14T06:46:31,321 WARN [Thread-1074 {}] hdfs.DataStreamer(1850): Abandoning BP-1678146812-172.17.0.2-1731566752608:blk_1073741912_1098 2024-11-14T06:46:31,321 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:34824 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741912_1098] {}] datanode.BlockReceiver(316): Block 1073741912 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-14T06:46:31,321 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:34824 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741912_1098] {}] datanode.DataXceiver(331): 127.0.0.1:46121:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34824 dst: /127.0.0.1:46121 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:31,321 WARN [Thread-1074 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45879,DS-c73e235d-0240-4454-afa5-0e0d9a19b6c4,DISK] 2024-11-14T06:46:31,323 WARN [Thread-1074 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741913_1099 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40987 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:31,323 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:34838 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741913_1099] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/data/data4]'}, localName='127.0.0.1:46121', datanodeUuid='ff1689b7-6834-4e0f-9cff-2acd1fab3999', xmitsInProgress=0}:Exception transferring block BP-1678146812-172.17.0.2-1731566752608:blk_1073741913_1099 to mirror 127.0.0.1:40987 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:31,323 WARN [Thread-1074 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1678146812-172.17.0.2-1731566752608:blk_1073741913_1099 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46121,DS-230efee1-675c-4a81-b43c-66956f7e849f,DISK], DatanodeInfoWithStorage[127.0.0.1:40987,DS-5415e41b-ebdb-4ff1-838b-53415f043d44,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40987,DS-5415e41b-ebdb-4ff1-838b-53415f043d44,DISK]) is bad. 2024-11-14T06:46:31,323 WARN [Thread-1074 {}] hdfs.DataStreamer(1850): Abandoning BP-1678146812-172.17.0.2-1731566752608:blk_1073741913_1099 2024-11-14T06:46:31,323 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:34838 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741913_1099] {}] datanode.BlockReceiver(316): Block 1073741913 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-14T06:46:31,324 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-597092649_22 at /127.0.0.1:34838 [Receiving block BP-1678146812-172.17.0.2-1731566752608:blk_1073741913_1099] {}] datanode.DataXceiver(331): 127.0.0.1:46121:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34838 dst: /127.0.0.1:46121 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:31,324 WARN [Thread-1074 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40987,DS-5415e41b-ebdb-4ff1-838b-53415f043d44,DISK] 2024-11-14T06:46:31,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35733 is added to blk_1073741914_1100 (size=5424) 2024-11-14T06:46:31,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46121 is added to blk_1073741914_1100 (size=5424) 2024-11-14T06:46:31,329 INFO [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/hbase/meta/1588230740/.tmp/table/3a595acb715c4723a32391ed707d0886 2024-11-14T06:46:31,335 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/hbase/meta/1588230740/.tmp/info/856f5289806a4e50bc59c602844172e9 as hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/hbase/meta/1588230740/info/856f5289806a4e50bc59c602844172e9 2024-11-14T06:46:31,340 INFO [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/hbase/meta/1588230740/info/856f5289806a4e50bc59c602844172e9, entries=10, sequenceid=11, filesize=6.9 K 2024-11-14T06:46:31,341 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/hbase/meta/1588230740/.tmp/ns/7372a3a5d09d46d1a89de8516639f8f5 as hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/hbase/meta/1588230740/ns/7372a3a5d09d46d1a89de8516639f8f5 2024-11-14T06:46:31,347 INFO [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/hbase/meta/1588230740/ns/7372a3a5d09d46d1a89de8516639f8f5, entries=2, sequenceid=11, filesize=5.0 K 2024-11-14T06:46:31,348 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/hbase/meta/1588230740/.tmp/table/3a595acb715c4723a32391ed707d0886 as hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/hbase/meta/1588230740/table/3a595acb715c4723a32391ed707d0886 2024-11-14T06:46:31,350 INFO [regionserver/20680646cf8a:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-14T06:46:31,350 INFO [regionserver/20680646cf8a:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-14T06:46:31,353 INFO [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/hbase/meta/1588230740/table/3a595acb715c4723a32391ed707d0886, entries=2, sequenceid=11, filesize=5.3 K 2024-11-14T06:46:31,354 INFO [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 134ms, sequenceid=11, compaction requested=false 2024-11-14T06:46:31,359 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-14T06:46:31,359 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T06:46:31,360 INFO [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T06:46:31,360 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731566791219Running coprocessor pre-close hooks at 1731566791219Disabling compacts and flushes for region at 1731566791219Disabling writes for close at 1731566791219Obtaining lock to block concurrent updates at 1731566791220 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1731566791220Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1731566791220Flushing stores of hbase:meta,,1.1588230740 at 1731566791242 (+22 ms)Flushing 1588230740/info: creating writer at 1731566791242Flushing 1588230740/info: appending metadata at 1731566791255 (+13 ms)Flushing 1588230740/info: closing flushed file at 1731566791255Flushing 1588230740/ns: creating writer at 1731566791273 (+18 ms)Flushing 1588230740/ns: appending metadata at 1731566791286 (+13 ms)Flushing 1588230740/ns: closing flushed file at 1731566791286Flushing 1588230740/table: creating writer at 1731566791303 (+17 ms)Flushing 1588230740/table: appending metadata at 1731566791316 (+13 ms)Flushing 1588230740/table: closing flushed file at 1731566791316Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5d61ffc3: reopening flushed file at 1731566791334 (+18 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1d1be0ea: reopening flushed file at 1731566791340 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@608962e3: reopening flushed file at 1731566791347 (+7 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 134ms, sequenceid=11, compaction requested=false at 1731566791354 (+7 ms)Writing region close event to WAL at 1731566791355 (+1 ms)Running coprocessor post-close hooks at 1731566791359 (+4 ms)Closed at 1731566791360 (+1 ms) 2024-11-14T06:46:31,360 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-14T06:46:31,419 INFO [RS:0;20680646cf8a:39105 {}] regionserver.HRegionServer(976): stopping server 20680646cf8a,39105,1731566753272; all regions closed. 2024-11-14T06:46:31,420 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:46:31,420 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:46:31,420 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:46:31,420 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:46:31,421 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:46:31,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35733 is added to blk_1073741904_1089 (size=825) 2024-11-14T06:46:31,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46121 is added to blk_1073741904_1089 (size=825) 2024-11-14T06:46:31,528 INFO [regionserver/20680646cf8a:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-14T06:46:31,528 INFO [regionserver/20680646cf8a:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-14T06:46:31,530 INFO [regionserver/20680646cf8a:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T06:46:32,307 INFO [regionserver/20680646cf8a:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T06:46:32,623 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@aef2d4f[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35733, datanodeUuid=92e56133-38bc-451b-a9b6-c6dd482e646f, infoPort=37411, infoSecurePort=0, ipcPort=43239, storageInfo=lv=-57;cid=testClusterID;nsid=1791374989;c=1731566752608):Failed to transfer BP-1678146812-172.17.0.2-1731566752608:blk_1073741876_1059 to 127.0.0.1:36149 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:34,198 INFO [master/20680646cf8a:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-14T06:46:34,198 INFO [master/20680646cf8a:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-14T06:46:34,528 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-14T06:46:34,531 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T06:46:34,531 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-14T06:46:35,227 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 after 4007ms 2024-11-14T06:46:35,239 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta after 4003ms 2024-11-14T06:46:35,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35733 is added to blk_1073741836_1012 (size=76) 2024-11-14T06:46:35,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35733 is added to blk_1073741832_1008 (size=32) 2024-11-14T06:46:36,220 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-14T06:46:36,227 DEBUG [RS:1;20680646cf8a:37019 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/oldWALs 2024-11-14T06:46:36,227 INFO [RS:1;20680646cf8a:37019 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 20680646cf8a%2C37019%2C1731566754231:(num 1731566754449) 2024-11-14T06:46:36,227 DEBUG [RS:1;20680646cf8a:37019 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T06:46:36,227 INFO [RS:1;20680646cf8a:37019 {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T06:46:36,228 INFO [RS:1;20680646cf8a:37019 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T06:46:36,228 INFO [RS:1;20680646cf8a:37019 {}] hbase.ChoreService(370): Chore service for: regionserver/20680646cf8a:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-14T06:46:36,229 INFO [RS:1;20680646cf8a:37019 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-14T06:46:36,229 INFO [RS:1;20680646cf8a:37019 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-14T06:46:36,229 INFO [regionserver/20680646cf8a:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T06:46:36,229 INFO [RS:1;20680646cf8a:37019 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-14T06:46:36,229 INFO [RS:1;20680646cf8a:37019 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T06:46:36,229 INFO [RS:1;20680646cf8a:37019 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37019 2024-11-14T06:46:36,232 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38495-0x1003cfb32220000, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T06:46:36,232 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37019-0x1003cfb32220002, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/20680646cf8a,37019,1731566754231 2024-11-14T06:46:36,232 INFO [RS:1;20680646cf8a:37019 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T06:46:36,232 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [20680646cf8a,37019,1731566754231] 2024-11-14T06:46:36,233 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/20680646cf8a,37019,1731566754231 already deleted, retry=false 2024-11-14T06:46:36,233 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 20680646cf8a,37019,1731566754231 expired; onlineServers=1 2024-11-14T06:46:36,235 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:46:36,247 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:46:36,265 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:46:36,265 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:46:36,266 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:46:36,266 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:46:36,266 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:46:36,275 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:46:36,275 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:46:36,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35733 is added to blk_1073741828_1004 (size=1189) 2024-11-14T06:46:36,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35733 is added to blk_1073741826_1002 (size=42) 2024-11-14T06:46:36,333 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37019-0x1003cfb32220002, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T06:46:36,333 INFO [RS:1;20680646cf8a:37019 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T06:46:36,333 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37019-0x1003cfb32220002, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T06:46:36,333 INFO [RS:1;20680646cf8a:37019 {}] regionserver.HRegionServer(1031): Exiting; stopping=20680646cf8a,37019,1731566754231; zookeeper connection closed. 2024-11-14T06:46:36,334 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1f830fe1 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1f830fe1 2024-11-14T06:46:36,421 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-14T06:46:36,431 DEBUG [RS:0;20680646cf8a:39105 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/oldWALs 2024-11-14T06:46:36,431 INFO [RS:0;20680646cf8a:39105 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 20680646cf8a%2C39105%2C1731566753272.meta:.meta(num 1731566791221) 2024-11-14T06:46:36,433 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:46:36,433 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:46:36,433 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:46:36,434 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:46:36,434 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:46:36,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46121 is added to blk_1073741891_1075 (size=16308) 2024-11-14T06:46:36,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35733 is added to blk_1073741891_1075 (size=16308) 2024-11-14T06:46:36,439 DEBUG [RS:0;20680646cf8a:39105 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/oldWALs 2024-11-14T06:46:36,439 INFO [RS:0;20680646cf8a:39105 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 20680646cf8a%2C39105%2C1731566753272:(num 1731566790759) 2024-11-14T06:46:36,439 DEBUG [RS:0;20680646cf8a:39105 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T06:46:36,439 INFO [RS:0;20680646cf8a:39105 {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T06:46:36,439 INFO [RS:0;20680646cf8a:39105 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T06:46:36,439 INFO [RS:0;20680646cf8a:39105 {}] hbase.ChoreService(370): Chore service for: regionserver/20680646cf8a:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-14T06:46:36,439 INFO [RS:0;20680646cf8a:39105 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T06:46:36,439 INFO [regionserver/20680646cf8a:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T06:46:36,439 INFO [RS:0;20680646cf8a:39105 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39105 2024-11-14T06:46:36,440 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39105-0x1003cfb32220001, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/20680646cf8a,39105,1731566753272 2024-11-14T06:46:36,440 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38495-0x1003cfb32220000, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T06:46:36,441 INFO [RS:0;20680646cf8a:39105 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T06:46:36,441 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [20680646cf8a,39105,1731566753272] 2024-11-14T06:46:36,442 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/20680646cf8a,39105,1731566753272 already deleted, retry=false 2024-11-14T06:46:36,442 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 20680646cf8a,39105,1731566753272 expired; onlineServers=0 2024-11-14T06:46:36,442 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '20680646cf8a,38495,1731566753222' ***** 2024-11-14T06:46:36,442 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-14T06:46:36,442 INFO [M:0;20680646cf8a:38495 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T06:46:36,442 INFO [M:0;20680646cf8a:38495 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T06:46:36,443 DEBUG [M:0;20680646cf8a:38495 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-14T06:46:36,443 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-14T06:46:36,443 DEBUG [M:0;20680646cf8a:38495 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-14T06:46:36,443 DEBUG [master/20680646cf8a:0:becomeActiveMaster-HFileCleaner.small.0-1731566753463 {}] cleaner.HFileCleaner(306): Exit Thread[master/20680646cf8a:0:becomeActiveMaster-HFileCleaner.small.0-1731566753463,5,FailOnTimeoutGroup] 2024-11-14T06:46:36,443 DEBUG [master/20680646cf8a:0:becomeActiveMaster-HFileCleaner.large.0-1731566753461 {}] cleaner.HFileCleaner(306): Exit Thread[master/20680646cf8a:0:becomeActiveMaster-HFileCleaner.large.0-1731566753461,5,FailOnTimeoutGroup] 2024-11-14T06:46:36,443 INFO [M:0;20680646cf8a:38495 {}] hbase.ChoreService(370): Chore service for: master/20680646cf8a:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-14T06:46:36,443 INFO [M:0;20680646cf8a:38495 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T06:46:36,443 DEBUG [M:0;20680646cf8a:38495 {}] master.HMaster(1795): Stopping service threads 2024-11-14T06:46:36,443 INFO [M:0;20680646cf8a:38495 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-14T06:46:36,443 INFO [M:0;20680646cf8a:38495 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T06:46:36,443 INFO [M:0;20680646cf8a:38495 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-14T06:46:36,444 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38495-0x1003cfb32220000, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-14T06:46:36,444 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38495-0x1003cfb32220000, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:46:36,444 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-14T06:46:36,444 DEBUG [M:0;20680646cf8a:38495 {}] zookeeper.ZKUtil(347): master:38495-0x1003cfb32220000, quorum=127.0.0.1:50335, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-14T06:46:36,444 WARN [M:0;20680646cf8a:38495 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-14T06:46:36,445 INFO [M:0;20680646cf8a:38495 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/.lastflushedseqids 2024-11-14T06:46:36,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35733 is added to blk_1073741915_1101 (size=130) 2024-11-14T06:46:36,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46121 is added to blk_1073741915_1101 (size=130) 2024-11-14T06:46:36,451 INFO [M:0;20680646cf8a:38495 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-14T06:46:36,451 INFO [M:0;20680646cf8a:38495 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-14T06:46:36,451 DEBUG [M:0;20680646cf8a:38495 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T06:46:36,451 INFO [M:0;20680646cf8a:38495 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T06:46:36,452 DEBUG [M:0;20680646cf8a:38495 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T06:46:36,452 DEBUG [M:0;20680646cf8a:38495 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T06:46:36,452 DEBUG [M:0;20680646cf8a:38495 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T06:46:36,452 INFO [M:0;20680646cf8a:38495 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.26 KB heapSize=29.50 KB 2024-11-14T06:46:36,467 DEBUG [M:0;20680646cf8a:38495 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7f8504413e024a28b5e56f2b1229d624 is 82, key is hbase:meta,,1/info:regioninfo/1731566754147/Put/seqid=0 2024-11-14T06:46:36,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35733 is added to blk_1073741916_1102 (size=5672) 2024-11-14T06:46:36,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46121 is added to blk_1073741916_1102 (size=5672) 2024-11-14T06:46:36,473 INFO [M:0;20680646cf8a:38495 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7f8504413e024a28b5e56f2b1229d624 2024-11-14T06:46:36,493 DEBUG [M:0;20680646cf8a:38495 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/eca559e73a70414c8ce57f352003e8a3 is 775, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731566754735/Put/seqid=0 2024-11-14T06:46:36,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35733 is added to blk_1073741917_1103 (size=6256) 2024-11-14T06:46:36,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46121 is added to blk_1073741917_1103 (size=6256) 2024-11-14T06:46:36,498 INFO [M:0;20680646cf8a:38495 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.59 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/eca559e73a70414c8ce57f352003e8a3 2024-11-14T06:46:36,503 INFO [M:0;20680646cf8a:38495 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for eca559e73a70414c8ce57f352003e8a3 2024-11-14T06:46:36,517 DEBUG [M:0;20680646cf8a:38495 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ebde1c977aea4b0083d967696e071851 is 69, key is 20680646cf8a,37019,1731566754231/rs:state/1731566754292/Put/seqid=0 2024-11-14T06:46:36,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35733 is added to blk_1073741918_1104 (size=5224) 2024-11-14T06:46:36,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46121 is added to blk_1073741918_1104 (size=5224) 2024-11-14T06:46:36,522 INFO [M:0;20680646cf8a:38495 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ebde1c977aea4b0083d967696e071851 2024-11-14T06:46:36,540 DEBUG [M:0;20680646cf8a:38495 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d7b9e2157b9849a0b039a44c6c41b4da is 52, key is load_balancer_on/state:d/1731566754211/Put/seqid=0 2024-11-14T06:46:36,542 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39105-0x1003cfb32220001, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T06:46:36,542 INFO [RS:0;20680646cf8a:39105 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T06:46:36,542 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39105-0x1003cfb32220001, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T06:46:36,542 INFO [RS:0;20680646cf8a:39105 {}] regionserver.HRegionServer(1031): Exiting; stopping=20680646cf8a,39105,1731566753272; zookeeper connection closed. 2024-11-14T06:46:36,542 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@52949f52 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@52949f52 2024-11-14T06:46:36,542 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-14T06:46:36,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35733 is added to blk_1073741919_1105 (size=5056) 2024-11-14T06:46:36,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46121 is added to blk_1073741919_1105 (size=5056) 2024-11-14T06:46:36,546 INFO [M:0;20680646cf8a:38495 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d7b9e2157b9849a0b039a44c6c41b4da 2024-11-14T06:46:36,551 DEBUG [M:0;20680646cf8a:38495 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7f8504413e024a28b5e56f2b1229d624 as hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/7f8504413e024a28b5e56f2b1229d624 2024-11-14T06:46:36,557 INFO [M:0;20680646cf8a:38495 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/7f8504413e024a28b5e56f2b1229d624, entries=8, sequenceid=60, filesize=5.5 K 2024-11-14T06:46:36,558 DEBUG [M:0;20680646cf8a:38495 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/eca559e73a70414c8ce57f352003e8a3 as hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/eca559e73a70414c8ce57f352003e8a3 2024-11-14T06:46:36,564 INFO [M:0;20680646cf8a:38495 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for eca559e73a70414c8ce57f352003e8a3 2024-11-14T06:46:36,564 INFO [M:0;20680646cf8a:38495 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/eca559e73a70414c8ce57f352003e8a3, entries=6, sequenceid=60, filesize=6.1 K 2024-11-14T06:46:36,565 DEBUG [M:0;20680646cf8a:38495 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ebde1c977aea4b0083d967696e071851 as hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/ebde1c977aea4b0083d967696e071851 2024-11-14T06:46:36,571 INFO [M:0;20680646cf8a:38495 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/ebde1c977aea4b0083d967696e071851, entries=2, sequenceid=60, filesize=5.1 K 2024-11-14T06:46:36,572 DEBUG [M:0;20680646cf8a:38495 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d7b9e2157b9849a0b039a44c6c41b4da as hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/d7b9e2157b9849a0b039a44c6c41b4da 2024-11-14T06:46:36,577 INFO [M:0;20680646cf8a:38495 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/d7b9e2157b9849a0b039a44c6c41b4da, entries=1, sequenceid=60, filesize=4.9 K 2024-11-14T06:46:36,579 INFO [M:0;20680646cf8a:38495 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.26 KB/23817, heapSize ~29.44 KB/30144, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 127ms, sequenceid=60, compaction requested=false 2024-11-14T06:46:36,580 INFO [M:0;20680646cf8a:38495 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T06:46:36,580 DEBUG [M:0;20680646cf8a:38495 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731566796451Disabling compacts and flushes for region at 1731566796451Disabling writes for close at 1731566796452 (+1 ms)Obtaining lock to block concurrent updates at 1731566796452Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731566796452Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23817, getHeapSize=30144, getOffHeapSize=0, getCellsCount=71 at 1731566796453 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731566796453Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731566796453Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731566796467 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731566796467Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731566796477 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731566796492 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731566796492Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731566796503 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731566796516 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731566796516Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731566796527 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731566796540 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731566796540Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@35d5024f: reopening flushed file at 1731566796550 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@213da61: reopening flushed file at 1731566796557 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5fa5493a: reopening flushed file at 1731566796564 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5826267b: reopening flushed file at 1731566796571 (+7 ms)Finished flush of dataSize ~23.26 KB/23817, heapSize ~29.44 KB/30144, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 127ms, sequenceid=60, compaction requested=false at 1731566796579 (+8 ms)Writing region close event to WAL at 1731566796580 (+1 ms)Closed at 1731566796580 2024-11-14T06:46:36,581 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:46:36,581 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:46:36,581 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:46:36,581 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:46:36,581 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:46:36,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35733 is added to blk_1073741887_1070 (size=1045) 2024-11-14T06:46:36,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46121 is added to blk_1073741887_1070 (size=1045) 2024-11-14T06:46:36,779 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-14T06:46:36,800 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:46:36,800 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:46:36,801 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:46:36,801 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:46:36,801 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:46:36,805 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:46:36,806 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:46:36,808 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:46:37,237 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:46:37,240 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:46:37,345 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@336d6b53 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1678146812-172.17.0.2-1731566752608:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:36149,null,null]) java.net.ConnectException: Call From 20680646cf8a/172.17.0.2 to localhost:34791 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-14T06:46:37,488 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/MasterData/WALs/20680646cf8a,38495,1731566753222/20680646cf8a%2C38495%2C1731566753222.1731566753373 to hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/MasterData/oldWALs/20680646cf8a%2C38495%2C1731566753222.1731566753373 2024-11-14T06:46:37,492 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/MasterData/oldWALs/20680646cf8a%2C38495%2C1731566753222.1731566753373 to hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/oldWALs/20680646cf8a%2C38495%2C1731566753222.1731566753373$masterlocalwal$ 2024-11-14T06:46:37,492 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T06:46:37,492 INFO [M:0;20680646cf8a:38495 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-14T06:46:37,492 INFO [M:0;20680646cf8a:38495 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38495 2024-11-14T06:46:37,493 INFO [M:0;20680646cf8a:38495 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T06:46:37,594 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38495-0x1003cfb32220000, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T06:46:37,594 INFO [M:0;20680646cf8a:38495 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T06:46:37,594 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38495-0x1003cfb32220000, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T06:46:37,599 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@47bbe019{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T06:46:37,600 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6f8ca33c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T06:46:37,600 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T06:46:37,601 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@c1d8e25{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T06:46:37,601 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2852206a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/hadoop.log.dir/,STOPPED} 2024-11-14T06:46:37,604 WARN [BP-1678146812-172.17.0.2-1731566752608 heartbeating to localhost/127.0.0.1:33995 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T06:46:37,604 WARN [BP-1678146812-172.17.0.2-1731566752608 heartbeating to localhost/127.0.0.1:33995 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1678146812-172.17.0.2-1731566752608 (Datanode Uuid ff1689b7-6834-4e0f-9cff-2acd1fab3999) service to localhost/127.0.0.1:33995 2024-11-14T06:46:37,604 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T06:46:37,604 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T06:46:37,603 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@4b50eabc {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1678146812-172.17.0.2-1731566752608:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:36149,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:34791 , LocalHost:localPort 20680646cf8a/172.17.0.2:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-14T06:46:37,605 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@4b50eabc {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-1678146812-172.17.0.2-1731566752608:blk_1073741837_1013; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:46121,null,null], DatanodeInfoWithStorage[127.0.0.1:36149,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: No block pool offer service for bpid=BP-1678146812-172.17.0.2-1731566752608 2024-11-14T06:46:37,605 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@4b50eabc {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1678146812-172.17.0.2-1731566752608:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:36149,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1678146812-172.17.0.2-1731566752608 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:37,605 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@4b50eabc {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1678146812-172.17.0.2-1731566752608:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:46121,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1678146812-172.17.0.2-1731566752608 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:37,605 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/data/data3/current/BP-1678146812-172.17.0.2-1731566752608 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T06:46:37,605 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@4b50eabc {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-1678146812-172.17.0.2-1731566752608:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:36149,null,null], DatanodeInfoWithStorage[127.0.0.1:46121,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-1678146812-172.17.0.2-1731566752608:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:36149,null,null], DatanodeInfoWithStorage[127.0.0.1:46121,null,null]] 2024-11-14T06:46:37,606 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/data/data4/current/BP-1678146812-172.17.0.2-1731566752608 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T06:46:37,606 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T06:46:37,608 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7a2a3a4f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T06:46:37,608 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7fa27241{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T06:46:37,609 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T06:46:37,609 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@402062d6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T06:46:37,609 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7b53e4f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/hadoop.log.dir/,STOPPED} 2024-11-14T06:46:37,610 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T06:46:37,610 WARN [BP-1678146812-172.17.0.2-1731566752608 heartbeating to localhost/127.0.0.1:33995 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T06:46:37,610 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T06:46:37,610 WARN [BP-1678146812-172.17.0.2-1731566752608 heartbeating to localhost/127.0.0.1:33995 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1678146812-172.17.0.2-1731566752608 (Datanode Uuid 92e56133-38bc-451b-a9b6-c6dd482e646f) service to localhost/127.0.0.1:33995 2024-11-14T06:46:37,610 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/data/data5/current/BP-1678146812-172.17.0.2-1731566752608 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T06:46:37,610 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/cluster_3447f163-80a8-b277-121c-0ac6df1b5e50/data/data6/current/BP-1678146812-172.17.0.2-1731566752608 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T06:46:37,611 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T06:46:37,615 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7cf515b1{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T06:46:37,616 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@c6abea1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T06:46:37,616 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T06:46:37,616 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5e7025d4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T06:46:37,616 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@16369da1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/hadoop.log.dir/,STOPPED} 2024-11-14T06:46:37,625 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-14T06:46:37,659 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-14T06:46:37,666 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=154 (was 79) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:33995 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$898/0x00007f88a8bef5c0.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33995 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:33995 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33995 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:33995 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33995 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:33995 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:33995 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:33995 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:36135 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$898/0x00007f88a8bef5c0.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:36135 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33995 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:33995 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=450 (was 402) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=98 (was 133), ProcessCount=11 (was 11), AvailableMemoryMB=387 (was 1219) 2024-11-14T06:46:37,673 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=154, OpenFileDescriptor=450, MaxFileDescriptor=1048576, SystemLoadAverage=98, ProcessCount=11, AvailableMemoryMB=387 2024-11-14T06:46:37,673 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-14T06:46:37,673 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/hadoop.log.dir so I do NOT create it in target/test-data/0204abc7-03d6-8b2a-123e-e4463a7b8816 2024-11-14T06:46:37,673 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/68f06828-ae2b-f982-0219-24a5c538376d/hadoop.tmp.dir so I do NOT create it in target/test-data/0204abc7-03d6-8b2a-123e-e4463a7b8816 2024-11-14T06:46:37,673 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0204abc7-03d6-8b2a-123e-e4463a7b8816/cluster_4618df87-c57f-5a2a-f298-de46af091010, deleteOnExit=true 2024-11-14T06:46:37,673 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-14T06:46:37,674 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0204abc7-03d6-8b2a-123e-e4463a7b8816/test.cache.data in system properties and HBase conf 2024-11-14T06:46:37,674 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0204abc7-03d6-8b2a-123e-e4463a7b8816/hadoop.tmp.dir in system properties and HBase conf 2024-11-14T06:46:37,674 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0204abc7-03d6-8b2a-123e-e4463a7b8816/hadoop.log.dir in system properties and HBase conf 2024-11-14T06:46:37,674 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0204abc7-03d6-8b2a-123e-e4463a7b8816/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-14T06:46:37,674 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0204abc7-03d6-8b2a-123e-e4463a7b8816/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-14T06:46:37,674 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-14T06:46:37,674 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-14T06:46:37,674 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0204abc7-03d6-8b2a-123e-e4463a7b8816/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-14T06:46:37,674 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0204abc7-03d6-8b2a-123e-e4463a7b8816/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-14T06:46:37,674 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0204abc7-03d6-8b2a-123e-e4463a7b8816/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-14T06:46:37,674 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0204abc7-03d6-8b2a-123e-e4463a7b8816/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T06:46:37,674 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0204abc7-03d6-8b2a-123e-e4463a7b8816/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-14T06:46:37,675 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0204abc7-03d6-8b2a-123e-e4463a7b8816/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-14T06:46:37,675 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0204abc7-03d6-8b2a-123e-e4463a7b8816/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T06:46:37,675 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0204abc7-03d6-8b2a-123e-e4463a7b8816/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T06:46:37,675 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0204abc7-03d6-8b2a-123e-e4463a7b8816/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-14T06:46:37,675 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0204abc7-03d6-8b2a-123e-e4463a7b8816/nfs.dump.dir in system properties and HBase conf 2024-11-14T06:46:37,675 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0204abc7-03d6-8b2a-123e-e4463a7b8816/java.io.tmpdir in system properties and HBase conf 2024-11-14T06:46:37,675 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0204abc7-03d6-8b2a-123e-e4463a7b8816/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T06:46:37,675 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0204abc7-03d6-8b2a-123e-e4463a7b8816/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-14T06:46:37,675 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0204abc7-03d6-8b2a-123e-e4463a7b8816/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-14T06:46:37,686 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T06:46:37,731 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T06:46:37,735 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T06:46:37,736 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T06:46:37,736 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T06:46:37,736 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T06:46:37,737 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T06:46:37,737 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@421a8f73{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0204abc7-03d6-8b2a-123e-e4463a7b8816/hadoop.log.dir/,AVAILABLE} 2024-11-14T06:46:37,738 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@13fdd007{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T06:46:37,831 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1ea36316{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0204abc7-03d6-8b2a-123e-e4463a7b8816/java.io.tmpdir/jetty-localhost-39803-hadoop-hdfs-3_4_1-tests_jar-_-any-16184491947707676360/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T06:46:37,832 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4d48810f{HTTP/1.1, (http/1.1)}{localhost:39803} 2024-11-14T06:46:37,832 INFO [Time-limited test {}] server.Server(415): Started @149428ms 2024-11-14T06:46:37,842 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T06:46:37,889 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T06:46:37,892 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T06:46:37,892 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T06:46:37,892 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T06:46:37,893 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T06:46:37,893 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@66c0323e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0204abc7-03d6-8b2a-123e-e4463a7b8816/hadoop.log.dir/,AVAILABLE} 2024-11-14T06:46:37,893 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@74ea1d44{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T06:46:37,986 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6e0e18a9{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0204abc7-03d6-8b2a-123e-e4463a7b8816/java.io.tmpdir/jetty-localhost-46401-hadoop-hdfs-3_4_1-tests_jar-_-any-14430383401353037461/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T06:46:37,986 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@a5d6d97{HTTP/1.1, (http/1.1)}{localhost:46401} 2024-11-14T06:46:37,986 INFO [Time-limited test {}] server.Server(415): Started @149583ms 2024-11-14T06:46:37,988 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T06:46:38,012 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T06:46:38,015 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T06:46:38,016 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T06:46:38,016 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T06:46:38,016 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T06:46:38,017 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28441b3a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0204abc7-03d6-8b2a-123e-e4463a7b8816/hadoop.log.dir/,AVAILABLE} 2024-11-14T06:46:38,018 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4360f0f4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T06:46:38,042 WARN [Thread-1198 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0204abc7-03d6-8b2a-123e-e4463a7b8816/cluster_4618df87-c57f-5a2a-f298-de46af091010/data/data1/current/BP-709259206-172.17.0.2-1731566797696/current, will proceed with Du for space computation calculation, 2024-11-14T06:46:38,042 WARN [Thread-1199 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0204abc7-03d6-8b2a-123e-e4463a7b8816/cluster_4618df87-c57f-5a2a-f298-de46af091010/data/data2/current/BP-709259206-172.17.0.2-1731566797696/current, will proceed with Du for space computation calculation, 2024-11-14T06:46:38,059 WARN [Thread-1177 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T06:46:38,061 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x604462094621dfe8 with lease ID 0x13aae018271b6808: Processing first storage report for DS-72bb6c81-338f-41ef-9f68-8fb00136b930 from datanode DatanodeRegistration(127.0.0.1:36407, datanodeUuid=659fc5c0-125d-4576-aad8-8ccd588b48bc, infoPort=39255, infoSecurePort=0, ipcPort=38189, storageInfo=lv=-57;cid=testClusterID;nsid=665760022;c=1731566797696) 2024-11-14T06:46:38,061 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x604462094621dfe8 with lease ID 0x13aae018271b6808: from storage DS-72bb6c81-338f-41ef-9f68-8fb00136b930 node DatanodeRegistration(127.0.0.1:36407, datanodeUuid=659fc5c0-125d-4576-aad8-8ccd588b48bc, infoPort=39255, infoSecurePort=0, ipcPort=38189, storageInfo=lv=-57;cid=testClusterID;nsid=665760022;c=1731566797696), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T06:46:38,061 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x604462094621dfe8 with lease ID 0x13aae018271b6808: Processing first storage report for DS-9bdf66f7-b45b-4afb-9957-c95cb710f622 from datanode DatanodeRegistration(127.0.0.1:36407, datanodeUuid=659fc5c0-125d-4576-aad8-8ccd588b48bc, infoPort=39255, infoSecurePort=0, ipcPort=38189, storageInfo=lv=-57;cid=testClusterID;nsid=665760022;c=1731566797696) 2024-11-14T06:46:38,061 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x604462094621dfe8 with lease ID 0x13aae018271b6808: from storage DS-9bdf66f7-b45b-4afb-9957-c95cb710f622 node DatanodeRegistration(127.0.0.1:36407, datanodeUuid=659fc5c0-125d-4576-aad8-8ccd588b48bc, infoPort=39255, infoSecurePort=0, ipcPort=38189, storageInfo=lv=-57;cid=testClusterID;nsid=665760022;c=1731566797696), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T06:46:38,116 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@9982f0a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0204abc7-03d6-8b2a-123e-e4463a7b8816/java.io.tmpdir/jetty-localhost-34967-hadoop-hdfs-3_4_1-tests_jar-_-any-16570264852830739360/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T06:46:38,116 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@70df7796{HTTP/1.1, (http/1.1)}{localhost:34967} 2024-11-14T06:46:38,116 INFO [Time-limited test {}] server.Server(415): Started @149713ms 2024-11-14T06:46:38,117 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T06:46:38,174 WARN [Thread-1224 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0204abc7-03d6-8b2a-123e-e4463a7b8816/cluster_4618df87-c57f-5a2a-f298-de46af091010/data/data3/current/BP-709259206-172.17.0.2-1731566797696/current, will proceed with Du for space computation calculation, 2024-11-14T06:46:38,174 WARN [Thread-1225 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0204abc7-03d6-8b2a-123e-e4463a7b8816/cluster_4618df87-c57f-5a2a-f298-de46af091010/data/data4/current/BP-709259206-172.17.0.2-1731566797696/current, will proceed with Du for space computation calculation, 2024-11-14T06:46:38,189 WARN [Thread-1213 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T06:46:38,191 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x333a28a838a89d02 with lease ID 0x13aae018271b6809: Processing first storage report for DS-6547f4ee-5cd4-4892-b9ea-4fea79a378a7 from datanode DatanodeRegistration(127.0.0.1:46667, datanodeUuid=15ed0fda-6d9b-43d9-b533-c5f98ecefeb6, infoPort=43761, infoSecurePort=0, ipcPort=34703, storageInfo=lv=-57;cid=testClusterID;nsid=665760022;c=1731566797696) 2024-11-14T06:46:38,192 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x333a28a838a89d02 with lease ID 0x13aae018271b6809: from storage DS-6547f4ee-5cd4-4892-b9ea-4fea79a378a7 node DatanodeRegistration(127.0.0.1:46667, datanodeUuid=15ed0fda-6d9b-43d9-b533-c5f98ecefeb6, infoPort=43761, infoSecurePort=0, ipcPort=34703, storageInfo=lv=-57;cid=testClusterID;nsid=665760022;c=1731566797696), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T06:46:38,192 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x333a28a838a89d02 with lease ID 0x13aae018271b6809: Processing first storage report for DS-6b458850-7ccb-4a84-984d-580d50e0a4b0 from datanode DatanodeRegistration(127.0.0.1:46667, datanodeUuid=15ed0fda-6d9b-43d9-b533-c5f98ecefeb6, infoPort=43761, infoSecurePort=0, ipcPort=34703, storageInfo=lv=-57;cid=testClusterID;nsid=665760022;c=1731566797696) 2024-11-14T06:46:38,192 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x333a28a838a89d02 with lease ID 0x13aae018271b6809: from storage DS-6b458850-7ccb-4a84-984d-580d50e0a4b0 node DatanodeRegistration(127.0.0.1:46667, datanodeUuid=15ed0fda-6d9b-43d9-b533-c5f98ecefeb6, infoPort=43761, infoSecurePort=0, ipcPort=34703, storageInfo=lv=-57;cid=testClusterID;nsid=665760022;c=1731566797696), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-14T06:46:38,238 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:46:38,241 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:46:38,243 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0204abc7-03d6-8b2a-123e-e4463a7b8816 2024-11-14T06:46:38,248 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0204abc7-03d6-8b2a-123e-e4463a7b8816/cluster_4618df87-c57f-5a2a-f298-de46af091010/zookeeper_0, clientPort=62168, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0204abc7-03d6-8b2a-123e-e4463a7b8816/cluster_4618df87-c57f-5a2a-f298-de46af091010/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0204abc7-03d6-8b2a-123e-e4463a7b8816/cluster_4618df87-c57f-5a2a-f298-de46af091010/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-14T06:46:38,249 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=62168 2024-11-14T06:46:38,249 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T06:46:38,251 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T06:46:38,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36407 is added to blk_1073741825_1001 (size=7) 2024-11-14T06:46:38,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46667 is added to blk_1073741825_1001 (size=7) 2024-11-14T06:46:38,259 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5 with version=8 2024-11-14T06:46:38,259 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/hbase-staging 2024-11-14T06:46:38,261 INFO [Time-limited test {}] client.ConnectionUtils(128): master/20680646cf8a:0 server-side Connection retries=45 2024-11-14T06:46:38,261 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T06:46:38,261 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T06:46:38,261 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T06:46:38,261 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T06:46:38,261 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T06:46:38,262 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-14T06:46:38,262 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T06:46:38,262 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39387 2024-11-14T06:46:38,264 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:39387 connecting to ZooKeeper ensemble=127.0.0.1:62168 2024-11-14T06:46:38,268 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:393870x0, quorum=127.0.0.1:62168, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T06:46:38,269 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:39387-0x1003cfbe2150000 connected 2024-11-14T06:46:38,282 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T06:46:38,284 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T06:46:38,287 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39387-0x1003cfbe2150000, quorum=127.0.0.1:62168, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T06:46:38,287 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5, hbase.cluster.distributed=false 2024-11-14T06:46:38,289 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39387-0x1003cfbe2150000, quorum=127.0.0.1:62168, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T06:46:38,290 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39387 2024-11-14T06:46:38,290 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39387 2024-11-14T06:46:38,290 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39387 2024-11-14T06:46:38,291 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39387 2024-11-14T06:46:38,291 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39387 2024-11-14T06:46:38,309 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/20680646cf8a:0 server-side Connection retries=45 2024-11-14T06:46:38,309 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T06:46:38,309 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T06:46:38,309 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T06:46:38,309 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T06:46:38,309 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T06:46:38,309 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-14T06:46:38,309 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T06:46:38,310 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44539 2024-11-14T06:46:38,311 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44539 connecting to ZooKeeper ensemble=127.0.0.1:62168 2024-11-14T06:46:38,312 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T06:46:38,313 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T06:46:38,317 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:445390x0, quorum=127.0.0.1:62168, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T06:46:38,317 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:445390x0, quorum=127.0.0.1:62168, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T06:46:38,317 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44539-0x1003cfbe2150001 connected 2024-11-14T06:46:38,317 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-14T06:46:38,318 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-14T06:46:38,319 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44539-0x1003cfbe2150001, quorum=127.0.0.1:62168, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-14T06:46:38,319 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44539-0x1003cfbe2150001, quorum=127.0.0.1:62168, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T06:46:38,321 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44539 2024-11-14T06:46:38,321 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44539 2024-11-14T06:46:38,323 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44539 2024-11-14T06:46:38,324 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44539 2024-11-14T06:46:38,325 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44539 2024-11-14T06:46:38,335 DEBUG [M:0;20680646cf8a:39387 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;20680646cf8a:39387 2024-11-14T06:46:38,337 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/20680646cf8a,39387,1731566798261 2024-11-14T06:46:38,338 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39387-0x1003cfbe2150000, quorum=127.0.0.1:62168, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T06:46:38,338 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1003cfbe2150001, quorum=127.0.0.1:62168, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T06:46:38,339 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39387-0x1003cfbe2150000, quorum=127.0.0.1:62168, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/20680646cf8a,39387,1731566798261 2024-11-14T06:46:38,340 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1003cfbe2150001, quorum=127.0.0.1:62168, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-14T06:46:38,340 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39387-0x1003cfbe2150000, quorum=127.0.0.1:62168, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:46:38,340 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1003cfbe2150001, quorum=127.0.0.1:62168, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:46:38,340 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39387-0x1003cfbe2150000, quorum=127.0.0.1:62168, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-14T06:46:38,340 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/20680646cf8a,39387,1731566798261 from backup master directory 2024-11-14T06:46:38,341 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1003cfbe2150001, quorum=127.0.0.1:62168, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T06:46:38,341 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39387-0x1003cfbe2150000, quorum=127.0.0.1:62168, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/20680646cf8a,39387,1731566798261 2024-11-14T06:46:38,341 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39387-0x1003cfbe2150000, quorum=127.0.0.1:62168, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T06:46:38,341 WARN [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T06:46:38,341 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=20680646cf8a,39387,1731566798261 2024-11-14T06:46:38,346 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/hbase.id] with ID: e7d7c8e2-23ca-4455-80b2-da659a94d1da 2024-11-14T06:46:38,346 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/.tmp/hbase.id 2024-11-14T06:46:38,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36407 is added to blk_1073741826_1002 (size=42) 2024-11-14T06:46:38,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46667 is added to blk_1073741826_1002 (size=42) 2024-11-14T06:46:38,353 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/.tmp/hbase.id]:[hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/hbase.id] 2024-11-14T06:46:38,364 INFO [master/20680646cf8a:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T06:46:38,364 INFO [master/20680646cf8a:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-14T06:46:38,365 INFO [master/20680646cf8a:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-14T06:46:38,366 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1003cfbe2150001, quorum=127.0.0.1:62168, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:46:38,366 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39387-0x1003cfbe2150000, quorum=127.0.0.1:62168, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:46:38,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46667 is added to blk_1073741827_1003 (size=196) 2024-11-14T06:46:38,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36407 is added to blk_1073741827_1003 (size=196) 2024-11-14T06:46:38,374 INFO [master/20680646cf8a:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-14T06:46:38,375 INFO [master/20680646cf8a:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-14T06:46:38,375 INFO [master/20680646cf8a:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T06:46:38,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46667 is added to blk_1073741828_1004 (size=1189) 2024-11-14T06:46:38,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36407 is added to blk_1073741828_1004 (size=1189) 2024-11-14T06:46:38,386 INFO [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/MasterData/data/master/store 2024-11-14T06:46:38,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46667 is added to blk_1073741829_1005 (size=34) 2024-11-14T06:46:38,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36407 is added to blk_1073741829_1005 (size=34) 2024-11-14T06:46:38,796 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T06:46:38,797 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T06:46:38,797 INFO [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T06:46:38,797 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T06:46:38,797 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T06:46:38,797 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T06:46:38,797 INFO [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T06:46:38,797 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731566798796Disabling compacts and flushes for region at 1731566798796Disabling writes for close at 1731566798797 (+1 ms)Writing region close event to WAL at 1731566798797Closed at 1731566798797 2024-11-14T06:46:38,799 WARN [master/20680646cf8a:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/MasterData/data/master/store/.initializing 2024-11-14T06:46:38,799 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/MasterData/WALs/20680646cf8a,39387,1731566798261 2024-11-14T06:46:38,806 INFO [master/20680646cf8a:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=20680646cf8a%2C39387%2C1731566798261, suffix=, logDir=hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/MasterData/WALs/20680646cf8a,39387,1731566798261, archiveDir=hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/MasterData/oldWALs, maxLogs=10 2024-11-14T06:46:38,806 INFO [master/20680646cf8a:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 20680646cf8a%2C39387%2C1731566798261.1731566798806 2024-11-14T06:46:38,812 INFO [master/20680646cf8a:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/MasterData/WALs/20680646cf8a,39387,1731566798261/20680646cf8a%2C39387%2C1731566798261.1731566798806 2024-11-14T06:46:38,815 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43761:43761),(127.0.0.1/127.0.0.1:39255:39255)] 2024-11-14T06:46:38,817 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-14T06:46:38,817 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T06:46:38,817 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:46:38,818 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:46:38,819 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:46:38,820 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-14T06:46:38,820 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:46:38,821 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T06:46:38,821 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:46:38,822 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-14T06:46:38,823 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:46:38,823 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T06:46:38,823 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:46:38,824 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-14T06:46:38,825 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:46:38,825 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T06:46:38,825 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:46:38,826 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-14T06:46:38,826 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:46:38,827 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T06:46:38,827 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:46:38,828 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:46:38,828 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:46:38,829 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:46:38,830 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:46:38,830 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-14T06:46:38,831 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:46:38,833 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T06:46:38,834 INFO [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=730270, jitterRate=-0.07141491770744324}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-14T06:46:38,834 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731566798818Initializing all the Stores at 1731566798819 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731566798819Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731566798819Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731566798819Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731566798819Cleaning up temporary data from old regions at 1731566798830 (+11 ms)Region opened successfully at 1731566798834 (+4 ms) 2024-11-14T06:46:38,835 INFO [master/20680646cf8a:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-14T06:46:38,838 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@77bff7a6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=20680646cf8a/172.17.0.2:0 2024-11-14T06:46:38,839 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-14T06:46:38,839 INFO [master/20680646cf8a:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-14T06:46:38,839 INFO [master/20680646cf8a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-14T06:46:38,839 INFO [master/20680646cf8a:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-14T06:46:38,840 INFO [master/20680646cf8a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-14T06:46:38,840 INFO [master/20680646cf8a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-14T06:46:38,840 INFO [master/20680646cf8a:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-14T06:46:38,842 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-14T06:46:38,843 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39387-0x1003cfbe2150000, quorum=127.0.0.1:62168, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-14T06:46:38,844 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-14T06:46:38,844 INFO [master/20680646cf8a:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-14T06:46:38,845 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39387-0x1003cfbe2150000, quorum=127.0.0.1:62168, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-14T06:46:38,846 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-14T06:46:38,846 INFO [master/20680646cf8a:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-14T06:46:38,847 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39387-0x1003cfbe2150000, quorum=127.0.0.1:62168, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-14T06:46:38,847 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-14T06:46:38,848 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39387-0x1003cfbe2150000, quorum=127.0.0.1:62168, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-14T06:46:38,849 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-14T06:46:38,851 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39387-0x1003cfbe2150000, quorum=127.0.0.1:62168, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-14T06:46:38,852 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-14T06:46:38,853 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1003cfbe2150001, quorum=127.0.0.1:62168, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T06:46:38,853 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39387-0x1003cfbe2150000, quorum=127.0.0.1:62168, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T06:46:38,853 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1003cfbe2150001, quorum=127.0.0.1:62168, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:46:38,853 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39387-0x1003cfbe2150000, quorum=127.0.0.1:62168, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:46:38,854 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=20680646cf8a,39387,1731566798261, sessionid=0x1003cfbe2150000, setting cluster-up flag (Was=false) 2024-11-14T06:46:38,855 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1003cfbe2150001, quorum=127.0.0.1:62168, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:46:38,855 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39387-0x1003cfbe2150000, quorum=127.0.0.1:62168, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:46:38,858 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-14T06:46:38,859 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=20680646cf8a,39387,1731566798261 2024-11-14T06:46:38,861 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39387-0x1003cfbe2150000, quorum=127.0.0.1:62168, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:46:38,861 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1003cfbe2150001, quorum=127.0.0.1:62168, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:46:38,864 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-14T06:46:38,865 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=20680646cf8a,39387,1731566798261 2024-11-14T06:46:38,866 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-14T06:46:38,867 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-14T06:46:38,868 INFO [master/20680646cf8a:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-14T06:46:38,868 INFO [master/20680646cf8a:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-14T06:46:38,868 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 20680646cf8a,39387,1731566798261 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-14T06:46:38,869 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/20680646cf8a:0, corePoolSize=5, maxPoolSize=5 2024-11-14T06:46:38,869 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/20680646cf8a:0, corePoolSize=5, maxPoolSize=5 2024-11-14T06:46:38,869 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/20680646cf8a:0, corePoolSize=5, maxPoolSize=5 2024-11-14T06:46:38,869 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/20680646cf8a:0, corePoolSize=5, maxPoolSize=5 2024-11-14T06:46:38,869 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/20680646cf8a:0, corePoolSize=10, maxPoolSize=10 2024-11-14T06:46:38,869 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:46:38,869 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/20680646cf8a:0, corePoolSize=2, maxPoolSize=2 2024-11-14T06:46:38,869 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:46:38,875 INFO [master/20680646cf8a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731566828875 2024-11-14T06:46:38,875 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T06:46:38,875 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-14T06:46:38,875 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-14T06:46:38,875 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-14T06:46:38,875 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-14T06:46:38,875 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-14T06:46:38,875 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-14T06:46:38,875 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-14T06:46:38,876 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:46:38,876 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-14T06:46:38,877 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T06:46:38,877 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-14T06:46:38,878 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-14T06:46:38,878 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-14T06:46:38,879 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-14T06:46:38,879 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-14T06:46:38,879 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/20680646cf8a:0:becomeActiveMaster-HFileCleaner.large.0-1731566798879,5,FailOnTimeoutGroup] 2024-11-14T06:46:38,879 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/20680646cf8a:0:becomeActiveMaster-HFileCleaner.small.0-1731566798879,5,FailOnTimeoutGroup] 2024-11-14T06:46:38,879 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T06:46:38,880 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-14T06:46:38,880 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-14T06:46:38,880 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-14T06:46:38,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36407 is added to blk_1073741831_1007 (size=1321) 2024-11-14T06:46:38,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46667 is added to blk_1073741831_1007 (size=1321) 2024-11-14T06:46:38,884 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-14T06:46:38,884 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5 2024-11-14T06:46:38,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46667 is added to blk_1073741832_1008 (size=32) 2024-11-14T06:46:38,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36407 is added to blk_1073741832_1008 (size=32) 2024-11-14T06:46:38,892 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T06:46:38,893 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T06:46:38,895 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T06:46:38,895 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:46:38,895 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T06:46:38,895 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T06:46:38,896 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T06:46:38,896 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:46:38,897 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T06:46:38,897 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T06:46:38,898 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T06:46:38,898 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:46:38,898 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T06:46:38,898 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T06:46:38,899 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T06:46:38,899 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:46:38,900 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T06:46:38,900 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T06:46:38,900 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/data/hbase/meta/1588230740 2024-11-14T06:46:38,901 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/data/hbase/meta/1588230740 2024-11-14T06:46:38,902 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T06:46:38,902 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T06:46:38,902 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T06:46:38,903 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T06:46:38,905 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T06:46:38,905 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=862727, jitterRate=0.09701412916183472}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T06:46:38,906 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731566798892Initializing all the Stores at 1731566798893 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731566798893Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731566798893Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731566798893Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731566798893Cleaning up temporary data from old regions at 1731566798902 (+9 ms)Region opened successfully at 1731566798906 (+4 ms) 2024-11-14T06:46:38,906 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T06:46:38,906 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T06:46:38,906 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T06:46:38,906 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T06:46:38,906 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T06:46:38,906 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T06:46:38,906 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731566798906Disabling compacts and flushes for region at 1731566798906Disabling writes for close at 1731566798906Writing region close event to WAL at 1731566798906Closed at 1731566798906 2024-11-14T06:46:38,907 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T06:46:38,907 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-14T06:46:38,908 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-14T06:46:38,909 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T06:46:38,910 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-14T06:46:38,927 INFO [RS:0;20680646cf8a:44539 {}] regionserver.HRegionServer(746): ClusterId : e7d7c8e2-23ca-4455-80b2-da659a94d1da 2024-11-14T06:46:38,927 DEBUG [RS:0;20680646cf8a:44539 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-14T06:46:38,929 DEBUG [RS:0;20680646cf8a:44539 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-14T06:46:38,929 DEBUG [RS:0;20680646cf8a:44539 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-14T06:46:38,930 DEBUG [RS:0;20680646cf8a:44539 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-14T06:46:38,931 DEBUG [RS:0;20680646cf8a:44539 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4007f9b0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=20680646cf8a/172.17.0.2:0 2024-11-14T06:46:38,940 DEBUG [RS:0;20680646cf8a:44539 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;20680646cf8a:44539 2024-11-14T06:46:38,941 INFO [RS:0;20680646cf8a:44539 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-14T06:46:38,941 INFO [RS:0;20680646cf8a:44539 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-14T06:46:38,941 DEBUG [RS:0;20680646cf8a:44539 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-14T06:46:38,942 INFO [RS:0;20680646cf8a:44539 {}] regionserver.HRegionServer(2659): reportForDuty to master=20680646cf8a,39387,1731566798261 with port=44539, startcode=1731566798308 2024-11-14T06:46:38,942 DEBUG [RS:0;20680646cf8a:44539 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-14T06:46:38,944 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40199, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-14T06:46:38,944 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39387 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 20680646cf8a,44539,1731566798308 2024-11-14T06:46:38,944 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39387 {}] master.ServerManager(517): Registering regionserver=20680646cf8a,44539,1731566798308 2024-11-14T06:46:38,946 DEBUG [RS:0;20680646cf8a:44539 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5 2024-11-14T06:46:38,946 DEBUG [RS:0;20680646cf8a:44539 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44227 2024-11-14T06:46:38,946 DEBUG [RS:0;20680646cf8a:44539 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-14T06:46:38,947 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39387-0x1003cfbe2150000, quorum=127.0.0.1:62168, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T06:46:38,947 DEBUG [RS:0;20680646cf8a:44539 {}] zookeeper.ZKUtil(111): regionserver:44539-0x1003cfbe2150001, quorum=127.0.0.1:62168, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/20680646cf8a,44539,1731566798308 2024-11-14T06:46:38,948 WARN [RS:0;20680646cf8a:44539 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T06:46:38,948 INFO [RS:0;20680646cf8a:44539 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T06:46:38,948 DEBUG [RS:0;20680646cf8a:44539 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308 2024-11-14T06:46:38,948 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [20680646cf8a,44539,1731566798308] 2024-11-14T06:46:38,951 INFO [RS:0;20680646cf8a:44539 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-14T06:46:38,952 INFO [RS:0;20680646cf8a:44539 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-14T06:46:38,953 INFO [RS:0;20680646cf8a:44539 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-14T06:46:38,953 INFO [RS:0;20680646cf8a:44539 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T06:46:38,953 INFO [RS:0;20680646cf8a:44539 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-14T06:46:38,954 INFO [RS:0;20680646cf8a:44539 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-14T06:46:38,954 INFO [RS:0;20680646cf8a:44539 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-14T06:46:38,954 DEBUG [RS:0;20680646cf8a:44539 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:46:38,954 DEBUG [RS:0;20680646cf8a:44539 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:46:38,954 DEBUG [RS:0;20680646cf8a:44539 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:46:38,954 DEBUG [RS:0;20680646cf8a:44539 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:46:38,954 DEBUG [RS:0;20680646cf8a:44539 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:46:38,954 DEBUG [RS:0;20680646cf8a:44539 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/20680646cf8a:0, corePoolSize=2, maxPoolSize=2 2024-11-14T06:46:38,954 DEBUG [RS:0;20680646cf8a:44539 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:46:38,954 DEBUG [RS:0;20680646cf8a:44539 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:46:38,954 DEBUG [RS:0;20680646cf8a:44539 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:46:38,954 DEBUG [RS:0;20680646cf8a:44539 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:46:38,954 DEBUG [RS:0;20680646cf8a:44539 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:46:38,955 DEBUG [RS:0;20680646cf8a:44539 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:46:38,955 DEBUG [RS:0;20680646cf8a:44539 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/20680646cf8a:0, corePoolSize=3, maxPoolSize=3 2024-11-14T06:46:38,955 DEBUG [RS:0;20680646cf8a:44539 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/20680646cf8a:0, corePoolSize=3, maxPoolSize=3 2024-11-14T06:46:38,956 INFO [RS:0;20680646cf8a:44539 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T06:46:38,956 INFO [RS:0;20680646cf8a:44539 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T06:46:38,956 INFO [RS:0;20680646cf8a:44539 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T06:46:38,956 INFO [RS:0;20680646cf8a:44539 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-14T06:46:38,956 INFO [RS:0;20680646cf8a:44539 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-14T06:46:38,956 INFO [RS:0;20680646cf8a:44539 {}] hbase.ChoreService(168): Chore ScheduledChore name=20680646cf8a,44539,1731566798308-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T06:46:38,970 INFO [RS:0;20680646cf8a:44539 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-14T06:46:38,970 INFO [RS:0;20680646cf8a:44539 {}] hbase.ChoreService(168): Chore ScheduledChore name=20680646cf8a,44539,1731566798308-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T06:46:38,970 INFO [RS:0;20680646cf8a:44539 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T06:46:38,970 INFO [RS:0;20680646cf8a:44539 {}] regionserver.Replication(171): 20680646cf8a,44539,1731566798308 started 2024-11-14T06:46:38,984 INFO [RS:0;20680646cf8a:44539 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T06:46:38,984 INFO [RS:0;20680646cf8a:44539 {}] regionserver.HRegionServer(1482): Serving as 20680646cf8a,44539,1731566798308, RpcServer on 20680646cf8a/172.17.0.2:44539, sessionid=0x1003cfbe2150001 2024-11-14T06:46:38,984 DEBUG [RS:0;20680646cf8a:44539 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-14T06:46:38,984 DEBUG [RS:0;20680646cf8a:44539 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 20680646cf8a,44539,1731566798308 2024-11-14T06:46:38,984 DEBUG [RS:0;20680646cf8a:44539 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '20680646cf8a,44539,1731566798308' 2024-11-14T06:46:38,984 DEBUG [RS:0;20680646cf8a:44539 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-14T06:46:38,985 DEBUG [RS:0;20680646cf8a:44539 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-14T06:46:38,985 DEBUG [RS:0;20680646cf8a:44539 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-14T06:46:38,985 DEBUG [RS:0;20680646cf8a:44539 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-14T06:46:38,985 DEBUG [RS:0;20680646cf8a:44539 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 20680646cf8a,44539,1731566798308 2024-11-14T06:46:38,985 DEBUG [RS:0;20680646cf8a:44539 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '20680646cf8a,44539,1731566798308' 2024-11-14T06:46:38,985 DEBUG [RS:0;20680646cf8a:44539 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-14T06:46:38,985 DEBUG [RS:0;20680646cf8a:44539 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-14T06:46:38,986 DEBUG [RS:0;20680646cf8a:44539 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-14T06:46:38,986 INFO [RS:0;20680646cf8a:44539 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-14T06:46:38,986 INFO [RS:0;20680646cf8a:44539 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-14T06:46:39,060 WARN [20680646cf8a:39387 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-14T06:46:39,088 INFO [RS:0;20680646cf8a:44539 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=20680646cf8a%2C44539%2C1731566798308, suffix=, logDir=hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308, archiveDir=hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/oldWALs, maxLogs=32 2024-11-14T06:46:39,088 INFO [RS:0;20680646cf8a:44539 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 20680646cf8a%2C44539%2C1731566798308.1731566799088 2024-11-14T06:46:39,096 INFO [RS:0;20680646cf8a:44539 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.1731566799088 2024-11-14T06:46:39,097 DEBUG [RS:0;20680646cf8a:44539 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39255:39255),(127.0.0.1/127.0.0.1:43761:43761)] 2024-11-14T06:46:39,239 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:46:39,242 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:46:39,310 DEBUG [20680646cf8a:39387 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-14T06:46:39,311 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=20680646cf8a,44539,1731566798308 2024-11-14T06:46:39,312 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 20680646cf8a,44539,1731566798308, state=OPENING 2024-11-14T06:46:39,313 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-14T06:46:39,313 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39387-0x1003cfbe2150000, quorum=127.0.0.1:62168, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:46:39,314 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1003cfbe2150001, quorum=127.0.0.1:62168, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:46:39,314 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T06:46:39,314 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T06:46:39,314 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T06:46:39,314 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=20680646cf8a,44539,1731566798308}] 2024-11-14T06:46:39,468 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-14T06:46:39,471 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49749, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-14T06:46:39,479 INFO [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-14T06:46:39,479 INFO [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T06:46:39,482 INFO [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=20680646cf8a%2C44539%2C1731566798308.meta, suffix=.meta, logDir=hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308, archiveDir=hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/oldWALs, maxLogs=32 2024-11-14T06:46:39,483 INFO [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 20680646cf8a%2C44539%2C1731566798308.meta.1731566799482.meta 2024-11-14T06:46:39,487 INFO [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.meta.1731566799482.meta 2024-11-14T06:46:39,488 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39255:39255),(127.0.0.1/127.0.0.1:43761:43761)] 2024-11-14T06:46:39,489 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-14T06:46:39,489 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-14T06:46:39,489 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-14T06:46:39,489 INFO [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-14T06:46:39,490 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-14T06:46:39,490 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T06:46:39,490 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-14T06:46:39,490 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-14T06:46:39,491 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T06:46:39,492 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T06:46:39,492 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:46:39,492 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T06:46:39,492 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T06:46:39,493 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T06:46:39,493 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:46:39,493 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T06:46:39,494 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T06:46:39,494 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T06:46:39,494 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:46:39,495 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T06:46:39,495 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T06:46:39,495 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T06:46:39,496 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:46:39,496 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T06:46:39,496 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T06:46:39,497 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/data/hbase/meta/1588230740 2024-11-14T06:46:39,498 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/data/hbase/meta/1588230740 2024-11-14T06:46:39,499 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T06:46:39,499 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T06:46:39,500 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T06:46:39,501 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T06:46:39,502 INFO [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=696800, jitterRate=-0.11397367715835571}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T06:46:39,502 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-14T06:46:39,502 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731566799490Writing region info on filesystem at 1731566799490Initializing all the Stores at 1731566799491 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731566799491Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731566799491Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731566799491Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731566799491Cleaning up temporary data from old regions at 1731566799499 (+8 ms)Running coprocessor post-open hooks at 1731566799502 (+3 ms)Region opened successfully at 1731566799502 2024-11-14T06:46:39,503 INFO [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731566799467 2024-11-14T06:46:39,506 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-14T06:46:39,506 INFO [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-14T06:46:39,506 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=20680646cf8a,44539,1731566798308 2024-11-14T06:46:39,507 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 20680646cf8a,44539,1731566798308, state=OPEN 2024-11-14T06:46:39,510 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39387-0x1003cfbe2150000, quorum=127.0.0.1:62168, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T06:46:39,510 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1003cfbe2150001, quorum=127.0.0.1:62168, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T06:46:39,510 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=20680646cf8a,44539,1731566798308 2024-11-14T06:46:39,510 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T06:46:39,510 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T06:46:39,513 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-14T06:46:39,513 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=20680646cf8a,44539,1731566798308 in 196 msec 2024-11-14T06:46:39,515 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-14T06:46:39,515 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 605 msec 2024-11-14T06:46:39,516 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T06:46:39,516 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-14T06:46:39,517 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T06:46:39,517 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=20680646cf8a,44539,1731566798308, seqNum=-1] 2024-11-14T06:46:39,517 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T06:46:39,518 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40393, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T06:46:39,524 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 656 msec 2024-11-14T06:46:39,524 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731566799524, completionTime=-1 2024-11-14T06:46:39,524 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-14T06:46:39,524 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-14T06:46:39,526 INFO [master/20680646cf8a:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-14T06:46:39,526 INFO [master/20680646cf8a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731566859526 2024-11-14T06:46:39,527 INFO [master/20680646cf8a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731566919527 2024-11-14T06:46:39,527 INFO [master/20680646cf8a:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-14T06:46:39,527 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=20680646cf8a,39387,1731566798261-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T06:46:39,527 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=20680646cf8a,39387,1731566798261-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T06:46:39,527 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=20680646cf8a,39387,1731566798261-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T06:46:39,527 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-20680646cf8a:39387, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T06:46:39,527 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-14T06:46:39,528 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-14T06:46:39,529 DEBUG [master/20680646cf8a:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-14T06:46:39,532 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.190sec 2024-11-14T06:46:39,532 INFO [master/20680646cf8a:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-14T06:46:39,532 INFO [master/20680646cf8a:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-14T06:46:39,532 INFO [master/20680646cf8a:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-14T06:46:39,532 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-14T06:46:39,532 INFO [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-14T06:46:39,532 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=20680646cf8a,39387,1731566798261-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T06:46:39,532 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=20680646cf8a,39387,1731566798261-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-14T06:46:39,535 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-14T06:46:39,535 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-14T06:46:39,535 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=20680646cf8a,39387,1731566798261-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T06:46:39,630 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b9f6f01, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T06:46:39,630 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 20680646cf8a,39387,-1 for getting cluster id 2024-11-14T06:46:39,630 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-14T06:46:39,633 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e7d7c8e2-23ca-4455-80b2-da659a94d1da' 2024-11-14T06:46:39,633 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-14T06:46:39,634 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e7d7c8e2-23ca-4455-80b2-da659a94d1da" 2024-11-14T06:46:39,634 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11330505, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T06:46:39,634 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [20680646cf8a,39387,-1] 2024-11-14T06:46:39,634 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-14T06:46:39,635 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T06:46:39,636 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55914, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-14T06:46:39,638 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3db1f6e1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T06:46:39,638 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T06:46:39,639 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=20680646cf8a,44539,1731566798308, seqNum=-1] 2024-11-14T06:46:39,640 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T06:46:39,643 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46136, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T06:46:39,645 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=20680646cf8a,39387,1731566798261 2024-11-14T06:46:39,645 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T06:46:39,648 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-14T06:46:39,648 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-11-14T06:46:39,648 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-11-14T06:46:39,648 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-14T06:46:39,649 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is 20680646cf8a,39387,1731566798261 2024-11-14T06:46:39,649 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@2a96d2f3 2024-11-14T06:46:39,649 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-14T06:46:39,651 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55920, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-14T06:46:39,651 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39387 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-14T06:46:39,651 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39387 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-14T06:46:39,652 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39387 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-14T06:46:39,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39387 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-11-14T06:46:39,654 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-11-14T06:46:39,654 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:46:39,655 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39387 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-11-14T06:46:39,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39387 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-14T06:46:39,656 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-14T06:46:39,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46667 is added to blk_1073741835_1011 (size=395) 2024-11-14T06:46:39,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36407 is added to blk_1073741835_1011 (size=395) 2024-11-14T06:46:39,663 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 40bca99bbade40641fdc5946bcdbab6a, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731566799651.40bca99bbade40641fdc5946bcdbab6a.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5 2024-11-14T06:46:39,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46667 is added to blk_1073741836_1012 (size=78) 2024-11-14T06:46:39,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36407 is added to blk_1073741836_1012 (size=78) 2024-11-14T06:46:39,670 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731566799651.40bca99bbade40641fdc5946bcdbab6a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T06:46:39,670 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing 40bca99bbade40641fdc5946bcdbab6a, disabling compactions & flushes 2024-11-14T06:46:39,670 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731566799651.40bca99bbade40641fdc5946bcdbab6a. 2024-11-14T06:46:39,670 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731566799651.40bca99bbade40641fdc5946bcdbab6a. 2024-11-14T06:46:39,670 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731566799651.40bca99bbade40641fdc5946bcdbab6a. after waiting 0 ms 2024-11-14T06:46:39,670 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731566799651.40bca99bbade40641fdc5946bcdbab6a. 2024-11-14T06:46:39,670 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731566799651.40bca99bbade40641fdc5946bcdbab6a. 2024-11-14T06:46:39,670 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for 40bca99bbade40641fdc5946bcdbab6a: Waiting for close lock at 1731566799670Disabling compacts and flushes for region at 1731566799670Disabling writes for close at 1731566799670Writing region close event to WAL at 1731566799670Closed at 1731566799670 2024-11-14T06:46:39,672 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-11-14T06:46:39,672 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1731566799651.40bca99bbade40641fdc5946bcdbab6a.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1731566799672"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731566799672"}]},"ts":"1731566799672"} 2024-11-14T06:46:39,674 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-14T06:46:39,675 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-14T06:46:39,676 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731566799675"}]},"ts":"1731566799675"} 2024-11-14T06:46:39,678 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-11-14T06:46:39,678 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=40bca99bbade40641fdc5946bcdbab6a, ASSIGN}] 2024-11-14T06:46:39,679 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=40bca99bbade40641fdc5946bcdbab6a, ASSIGN 2024-11-14T06:46:39,680 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=40bca99bbade40641fdc5946bcdbab6a, ASSIGN; state=OFFLINE, location=20680646cf8a,44539,1731566798308; forceNewPlan=false, retain=false 2024-11-14T06:46:39,832 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=40bca99bbade40641fdc5946bcdbab6a, regionState=OPENING, regionLocation=20680646cf8a,44539,1731566798308 2024-11-14T06:46:39,839 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=40bca99bbade40641fdc5946bcdbab6a, ASSIGN because future has completed 2024-11-14T06:46:39,840 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 40bca99bbade40641fdc5946bcdbab6a, server=20680646cf8a,44539,1731566798308}] 2024-11-14T06:46:40,006 INFO [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1731566799651.40bca99bbade40641fdc5946bcdbab6a. 2024-11-14T06:46:40,007 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 40bca99bbade40641fdc5946bcdbab6a, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731566799651.40bca99bbade40641fdc5946bcdbab6a.', STARTKEY => '', ENDKEY => ''} 2024-11-14T06:46:40,007 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart 40bca99bbade40641fdc5946bcdbab6a 2024-11-14T06:46:40,008 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731566799651.40bca99bbade40641fdc5946bcdbab6a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T06:46:40,008 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 40bca99bbade40641fdc5946bcdbab6a 2024-11-14T06:46:40,008 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 40bca99bbade40641fdc5946bcdbab6a 2024-11-14T06:46:40,011 INFO [StoreOpener-40bca99bbade40641fdc5946bcdbab6a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 40bca99bbade40641fdc5946bcdbab6a 2024-11-14T06:46:40,014 INFO [StoreOpener-40bca99bbade40641fdc5946bcdbab6a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 40bca99bbade40641fdc5946bcdbab6a columnFamilyName info 2024-11-14T06:46:40,014 DEBUG [StoreOpener-40bca99bbade40641fdc5946bcdbab6a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:46:40,015 INFO [StoreOpener-40bca99bbade40641fdc5946bcdbab6a-1 {}] regionserver.HStore(327): Store=40bca99bbade40641fdc5946bcdbab6a/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T06:46:40,015 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 40bca99bbade40641fdc5946bcdbab6a 2024-11-14T06:46:40,016 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/data/default/TestLogRolling-testLogRollOnPipelineRestart/40bca99bbade40641fdc5946bcdbab6a 2024-11-14T06:46:40,016 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/data/default/TestLogRolling-testLogRollOnPipelineRestart/40bca99bbade40641fdc5946bcdbab6a 2024-11-14T06:46:40,017 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 40bca99bbade40641fdc5946bcdbab6a 2024-11-14T06:46:40,017 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 40bca99bbade40641fdc5946bcdbab6a 2024-11-14T06:46:40,020 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 40bca99bbade40641fdc5946bcdbab6a 2024-11-14T06:46:40,024 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/data/default/TestLogRolling-testLogRollOnPipelineRestart/40bca99bbade40641fdc5946bcdbab6a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T06:46:40,024 INFO [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 40bca99bbade40641fdc5946bcdbab6a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=789946, jitterRate=0.004469186067581177}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-14T06:46:40,024 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 40bca99bbade40641fdc5946bcdbab6a 2024-11-14T06:46:40,025 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 40bca99bbade40641fdc5946bcdbab6a: Running coprocessor pre-open hook at 1731566800008Writing region info on filesystem at 1731566800008Initializing all the Stores at 1731566800010 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731566800010Cleaning up temporary data from old regions at 1731566800017 (+7 ms)Running coprocessor post-open hooks at 1731566800024 (+7 ms)Region opened successfully at 1731566800025 (+1 ms) 2024-11-14T06:46:40,027 INFO [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1731566799651.40bca99bbade40641fdc5946bcdbab6a., pid=6, masterSystemTime=1731566799997 2024-11-14T06:46:40,030 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1731566799651.40bca99bbade40641fdc5946bcdbab6a. 2024-11-14T06:46:40,030 INFO [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1731566799651.40bca99bbade40641fdc5946bcdbab6a. 2024-11-14T06:46:40,031 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=40bca99bbade40641fdc5946bcdbab6a, regionState=OPEN, openSeqNum=2, regionLocation=20680646cf8a,44539,1731566798308 2024-11-14T06:46:40,034 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 40bca99bbade40641fdc5946bcdbab6a, server=20680646cf8a,44539,1731566798308 because future has completed 2024-11-14T06:46:40,039 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-14T06:46:40,040 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 40bca99bbade40641fdc5946bcdbab6a, server=20680646cf8a,44539,1731566798308 in 196 msec 2024-11-14T06:46:40,043 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-14T06:46:40,043 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=40bca99bbade40641fdc5946bcdbab6a, ASSIGN in 362 msec 2024-11-14T06:46:40,045 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-14T06:46:40,045 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731566800045"}]},"ts":"1731566800045"} 2024-11-14T06:46:40,048 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-11-14T06:46:40,049 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-11-14T06:46:40,052 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 397 msec 2024-11-14T06:46:40,240 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:46:40,243 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:46:41,242 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:46:41,245 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:46:42,244 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:46:42,246 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:46:43,245 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:46:43,247 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:46:44,246 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:46:44,248 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:46:44,997 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-14T06:46:45,020 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:46:45,020 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:46:45,020 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:46:45,021 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:46:45,021 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:46:45,021 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:46:45,025 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:46:45,025 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:46:45,025 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:46:45,028 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:46:45,040 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-14T06:46:45,040 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-14T06:46:45,041 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-14T06:46:45,041 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-11-14T06:46:45,041 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T06:46:45,041 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-14T06:46:45,041 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-14T06:46:45,042 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-11-14T06:46:45,247 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:46:45,249 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:46:46,248 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:46:46,250 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:46:47,249 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:46:47,250 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:46:48,250 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:46:48,251 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:46:49,251 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:46:49,252 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:46:49,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39387 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-14T06:46:49,706 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-11-14T06:46:49,706 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-11-14T06:46:49,712 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-11-14T06:46:49,712 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1731566799651.40bca99bbade40641fdc5946bcdbab6a. 2024-11-14T06:46:49,718 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1731566799651.40bca99bbade40641fdc5946bcdbab6a., hostname=20680646cf8a,44539,1731566798308, seqNum=2] 2024-11-14T06:46:50,252 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:46:50,253 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:46:51,259 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:46:51,259 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:46:51,721 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.1731566799088 2024-11-14T06:46:51,723 WARN [ResponseProcessor for block BP-709259206-172.17.0.2-1731566797696:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-709259206-172.17.0.2-1731566797696:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:51,723 WARN [ResponseProcessor for block BP-709259206-172.17.0.2-1731566797696:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-709259206-172.17.0.2-1731566797696:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-709259206-172.17.0.2-1731566797696:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:46667,DS-6547f4ee-5cd4-4892-b9ea-4fea79a378a7,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:51,723 WARN [ResponseProcessor for block BP-709259206-172.17.0.2-1731566797696:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-709259206-172.17.0.2-1731566797696:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-709259206-172.17.0.2-1731566797696:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:46667,DS-6547f4ee-5cd4-4892-b9ea-4fea79a378a7,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:51,724 WARN [DataStreamer for file /user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.1731566799088 block BP-709259206-172.17.0.2-1731566797696:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-709259206-172.17.0.2-1731566797696:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36407,DS-72bb6c81-338f-41ef-9f68-8fb00136b930,DISK], DatanodeInfoWithStorage[127.0.0.1:46667,DS-6547f4ee-5cd4-4892-b9ea-4fea79a378a7,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46667,DS-6547f4ee-5cd4-4892-b9ea-4fea79a378a7,DISK]) is bad. 2024-11-14T06:46:51,725 WARN [DataStreamer for file /user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/MasterData/WALs/20680646cf8a,39387,1731566798261/20680646cf8a%2C39387%2C1731566798261.1731566798806 block BP-709259206-172.17.0.2-1731566797696:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-709259206-172.17.0.2-1731566797696:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46667,DS-6547f4ee-5cd4-4892-b9ea-4fea79a378a7,DISK], DatanodeInfoWithStorage[127.0.0.1:36407,DS-72bb6c81-338f-41ef-9f68-8fb00136b930,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46667,DS-6547f4ee-5cd4-4892-b9ea-4fea79a378a7,DISK]) is bad. 2024-11-14T06:46:51,725 WARN [DataStreamer for file /user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.meta.1731566799482.meta block BP-709259206-172.17.0.2-1731566797696:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-709259206-172.17.0.2-1731566797696:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36407,DS-72bb6c81-338f-41ef-9f68-8fb00136b930,DISK], DatanodeInfoWithStorage[127.0.0.1:46667,DS-6547f4ee-5cd4-4892-b9ea-4fea79a378a7,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46667,DS-6547f4ee-5cd4-4892-b9ea-4fea79a378a7,DISK]) is bad. 2024-11-14T06:46:51,724 WARN [PacketResponder: BP-709259206-172.17.0.2-1731566797696:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:46667] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:51,725 WARN [PacketResponder: BP-709259206-172.17.0.2-1731566797696:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:46667] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:51,725 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2126938029_22 at /127.0.0.1:36292 [Receiving block BP-709259206-172.17.0.2-1731566797696:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:46667:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36292 dst: /127.0.0.1:46667 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:51,726 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2126938029_22 at /127.0.0.1:59360 [Receiving block BP-709259206-172.17.0.2-1731566797696:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:36407:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59360 dst: /127.0.0.1:36407 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:51,726 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_867469656_22 at /127.0.0.1:50372 [Receiving block BP-709259206-172.17.0.2-1731566797696:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:36407:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50372 dst: /127.0.0.1:36407 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:51,727 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_867469656_22 at /127.0.0.1:50388 [Receiving block BP-709259206-172.17.0.2-1731566797696:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:36407:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50388 dst: /127.0.0.1:36407 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:51,727 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_867469656_22 at /127.0.0.1:38896 [Receiving block BP-709259206-172.17.0.2-1731566797696:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:46667:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38896 dst: /127.0.0.1:46667 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:51,727 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_867469656_22 at /127.0.0.1:38926 [Receiving block BP-709259206-172.17.0.2-1731566797696:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:46667:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38926 dst: /127.0.0.1:46667 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:51,732 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@9982f0a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T06:46:51,732 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@70df7796{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T06:46:51,732 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T06:46:51,732 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4360f0f4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T06:46:51,733 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28441b3a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0204abc7-03d6-8b2a-123e-e4463a7b8816/hadoop.log.dir/,STOPPED} 2024-11-14T06:46:51,734 WARN [BP-709259206-172.17.0.2-1731566797696 heartbeating to localhost/127.0.0.1:44227 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T06:46:51,734 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T06:46:51,734 WARN [BP-709259206-172.17.0.2-1731566797696 heartbeating to localhost/127.0.0.1:44227 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-709259206-172.17.0.2-1731566797696 (Datanode Uuid 15ed0fda-6d9b-43d9-b533-c5f98ecefeb6) service to localhost/127.0.0.1:44227 2024-11-14T06:46:51,734 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T06:46:51,734 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0204abc7-03d6-8b2a-123e-e4463a7b8816/cluster_4618df87-c57f-5a2a-f298-de46af091010/data/data3/current/BP-709259206-172.17.0.2-1731566797696 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T06:46:51,734 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0204abc7-03d6-8b2a-123e-e4463a7b8816/cluster_4618df87-c57f-5a2a-f298-de46af091010/data/data4/current/BP-709259206-172.17.0.2-1731566797696 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T06:46:51,735 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T06:46:51,741 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T06:46:51,744 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T06:46:51,744 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T06:46:51,744 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T06:46:51,744 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T06:46:51,745 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@431e378c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0204abc7-03d6-8b2a-123e-e4463a7b8816/hadoop.log.dir/,AVAILABLE} 2024-11-14T06:46:51,745 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@68e23717{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T06:46:51,836 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@21595673{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0204abc7-03d6-8b2a-123e-e4463a7b8816/java.io.tmpdir/jetty-localhost-43539-hadoop-hdfs-3_4_1-tests_jar-_-any-11522328010593346270/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T06:46:51,837 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4c50d76a{HTTP/1.1, (http/1.1)}{localhost:43539} 2024-11-14T06:46:51,837 INFO [Time-limited test {}] server.Server(415): Started @163433ms 2024-11-14T06:46:51,838 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T06:46:51,853 WARN [ResponseProcessor for block BP-709259206-172.17.0.2-1731566797696:blk_1073741830_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-709259206-172.17.0.2-1731566797696:blk_1073741830_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:51,853 WARN [ResponseProcessor for block BP-709259206-172.17.0.2-1731566797696:blk_1073741834_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-709259206-172.17.0.2-1731566797696:blk_1073741834_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:51,853 WARN [ResponseProcessor for block BP-709259206-172.17.0.2-1731566797696:blk_1073741833_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-709259206-172.17.0.2-1731566797696:blk_1073741833_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:51,854 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2126938029_22 at /127.0.0.1:32802 [Receiving block BP-709259206-172.17.0.2-1731566797696:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:36407:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:32802 dst: /127.0.0.1:36407 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:51,854 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_867469656_22 at /127.0.0.1:32792 [Receiving block BP-709259206-172.17.0.2-1731566797696:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:36407:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:32792 dst: /127.0.0.1:36407 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:51,854 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_867469656_22 at /127.0.0.1:32780 [Receiving block BP-709259206-172.17.0.2-1731566797696:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:36407:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:32780 dst: /127.0.0.1:36407 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:51,858 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6e0e18a9{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T06:46:51,859 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@a5d6d97{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T06:46:51,859 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T06:46:51,859 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@74ea1d44{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T06:46:51,859 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@66c0323e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0204abc7-03d6-8b2a-123e-e4463a7b8816/hadoop.log.dir/,STOPPED} 2024-11-14T06:46:51,860 WARN [BP-709259206-172.17.0.2-1731566797696 heartbeating to localhost/127.0.0.1:44227 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T06:46:51,860 WARN [BP-709259206-172.17.0.2-1731566797696 heartbeating to localhost/127.0.0.1:44227 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-709259206-172.17.0.2-1731566797696 (Datanode Uuid 659fc5c0-125d-4576-aad8-8ccd588b48bc) service to localhost/127.0.0.1:44227 2024-11-14T06:46:51,860 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T06:46:51,860 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T06:46:51,861 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0204abc7-03d6-8b2a-123e-e4463a7b8816/cluster_4618df87-c57f-5a2a-f298-de46af091010/data/data1/current/BP-709259206-172.17.0.2-1731566797696 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T06:46:51,861 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0204abc7-03d6-8b2a-123e-e4463a7b8816/cluster_4618df87-c57f-5a2a-f298-de46af091010/data/data2/current/BP-709259206-172.17.0.2-1731566797696 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T06:46:51,861 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T06:46:51,869 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T06:46:51,873 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T06:46:51,874 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T06:46:51,874 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T06:46:51,874 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T06:46:51,874 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@62f6e774{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0204abc7-03d6-8b2a-123e-e4463a7b8816/hadoop.log.dir/,AVAILABLE} 2024-11-14T06:46:51,875 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@31267d1e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T06:46:51,912 WARN [Thread-1348 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T06:46:51,915 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x33b4fa6dceae0e28 with lease ID 0x13aae018271b680a: from storage DS-6547f4ee-5cd4-4892-b9ea-4fea79a378a7 node DatanodeRegistration(127.0.0.1:40597, datanodeUuid=15ed0fda-6d9b-43d9-b533-c5f98ecefeb6, infoPort=43365, infoSecurePort=0, ipcPort=40925, storageInfo=lv=-57;cid=testClusterID;nsid=665760022;c=1731566797696), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-14T06:46:51,915 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x33b4fa6dceae0e28 with lease ID 0x13aae018271b680a: from storage DS-6b458850-7ccb-4a84-984d-580d50e0a4b0 node DatanodeRegistration(127.0.0.1:40597, datanodeUuid=15ed0fda-6d9b-43d9-b533-c5f98ecefeb6, infoPort=43365, infoSecurePort=0, ipcPort=40925, storageInfo=lv=-57;cid=testClusterID;nsid=665760022;c=1731566797696), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T06:46:51,974 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@70770496{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0204abc7-03d6-8b2a-123e-e4463a7b8816/java.io.tmpdir/jetty-localhost-38403-hadoop-hdfs-3_4_1-tests_jar-_-any-4326777460081129919/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T06:46:51,974 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@542bdd12{HTTP/1.1, (http/1.1)}{localhost:38403} 2024-11-14T06:46:51,974 INFO [Time-limited test {}] server.Server(415): Started @163571ms 2024-11-14T06:46:51,975 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T06:46:52,036 WARN [Thread-1379 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T06:46:52,039 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb3b5ba8ebd4f3703 with lease ID 0x13aae018271b680b: from storage DS-72bb6c81-338f-41ef-9f68-8fb00136b930 node DatanodeRegistration(127.0.0.1:33225, datanodeUuid=659fc5c0-125d-4576-aad8-8ccd588b48bc, infoPort=41661, infoSecurePort=0, ipcPort=39763, storageInfo=lv=-57;cid=testClusterID;nsid=665760022;c=1731566797696), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T06:46:52,039 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb3b5ba8ebd4f3703 with lease ID 0x13aae018271b680b: from storage DS-9bdf66f7-b45b-4afb-9957-c95cb710f622 node DatanodeRegistration(127.0.0.1:33225, datanodeUuid=659fc5c0-125d-4576-aad8-8ccd588b48bc, infoPort=41661, infoSecurePort=0, ipcPort=39763, storageInfo=lv=-57;cid=testClusterID;nsid=665760022;c=1731566797696), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T06:46:52,260 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:46:52,260 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:46:52,992 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-11-14T06:46:52,998 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-11-14T06:46:53,001 ERROR [FSHLog-0-hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5-prefix:20680646cf8a,44539,1731566798308 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36407,DS-72bb6c81-338f-41ef-9f68-8fb00136b930,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:53,001 WARN [FSHLog-0-hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5-prefix:20680646cf8a,44539,1731566798308 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36407,DS-72bb6c81-338f-41ef-9f68-8fb00136b930,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:53,001 DEBUG [regionserver/20680646cf8a:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 20680646cf8a%2C44539%2C1731566798308:(num 1731566799088) roll requested 2024-11-14T06:46:53,002 INFO [regionserver/20680646cf8a:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 20680646cf8a%2C44539%2C1731566798308.1731566813001 2024-11-14T06:46:53,034 DEBUG [regionserver/20680646cf8a:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.1731566799088 newFile=hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.1731566813001 2024-11-14T06:46:53,034 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:46:53,034 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:46:53,034 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:46:53,034 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:46:53,035 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:46:53,035 INFO [regionserver/20680646cf8a:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.1731566799088 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.1731566813001 2024-11-14T06:46:53,035 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36407,DS-72bb6c81-338f-41ef-9f68-8fb00136b930,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:53,035 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36407,DS-72bb6c81-338f-41ef-9f68-8fb00136b930,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:53,035 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.1731566799088 2024-11-14T06:46:53,036 WARN [IPC Server handler 3 on default port 44227 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.1731566799088 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1014 2024-11-14T06:46:53,036 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.1731566799088 after 1ms 2024-11-14T06:46:53,037 DEBUG [regionserver/20680646cf8a:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41661:41661),(127.0.0.1/127.0.0.1:43365:43365)] 2024-11-14T06:46:53,037 DEBUG [regionserver/20680646cf8a:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.1731566799088 is not closed yet, will try archiving it next time 2024-11-14T06:46:53,261 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:46:53,261 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:46:54,263 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:46:54,263 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:46:55,044 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-11-14T06:46:55,264 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:46:55,264 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:46:55,916 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1014: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-14T06:46:56,265 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:46:56,265 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:46:57,038 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.1731566799088 after 4003ms 2024-11-14T06:46:57,050 WARN [ResponseProcessor for block BP-709259206-172.17.0.2-1731566797696:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-709259206-172.17.0.2-1731566797696:blk_1073741837_1016 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:57,051 WARN [DataStreamer for file /user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.1731566813001 block BP-709259206-172.17.0.2-1731566797696:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-709259206-172.17.0.2-1731566797696:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33225,DS-72bb6c81-338f-41ef-9f68-8fb00136b930,DISK], DatanodeInfoWithStorage[127.0.0.1:40597,DS-6547f4ee-5cd4-4892-b9ea-4fea79a378a7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33225,DS-72bb6c81-338f-41ef-9f68-8fb00136b930,DISK]) is bad. 2024-11-14T06:46:57,052 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_867469656_22 at /127.0.0.1:48518 [Receiving block BP-709259206-172.17.0.2-1731566797696:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:40597:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48518 dst: /127.0.0.1:40597 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:57,052 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_867469656_22 at /127.0.0.1:59988 [Receiving block BP-709259206-172.17.0.2-1731566797696:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:33225:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59988 dst: /127.0.0.1:33225 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:57,055 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@70770496{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T06:46:57,056 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@542bdd12{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T06:46:57,056 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T06:46:57,057 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@31267d1e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T06:46:57,057 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@62f6e774{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0204abc7-03d6-8b2a-123e-e4463a7b8816/hadoop.log.dir/,STOPPED} 2024-11-14T06:46:57,059 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T06:46:57,059 WARN [BP-709259206-172.17.0.2-1731566797696 heartbeating to localhost/127.0.0.1:44227 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T06:46:57,059 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T06:46:57,059 WARN [BP-709259206-172.17.0.2-1731566797696 heartbeating to localhost/127.0.0.1:44227 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-709259206-172.17.0.2-1731566797696 (Datanode Uuid 659fc5c0-125d-4576-aad8-8ccd588b48bc) service to localhost/127.0.0.1:44227 2024-11-14T06:46:57,060 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0204abc7-03d6-8b2a-123e-e4463a7b8816/cluster_4618df87-c57f-5a2a-f298-de46af091010/data/data1/current/BP-709259206-172.17.0.2-1731566797696 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T06:46:57,060 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0204abc7-03d6-8b2a-123e-e4463a7b8816/cluster_4618df87-c57f-5a2a-f298-de46af091010/data/data2/current/BP-709259206-172.17.0.2-1731566797696 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T06:46:57,060 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T06:46:57,069 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T06:46:57,072 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T06:46:57,073 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T06:46:57,073 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T06:46:57,073 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T06:46:57,075 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@32c717fb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0204abc7-03d6-8b2a-123e-e4463a7b8816/hadoop.log.dir/,AVAILABLE} 2024-11-14T06:46:57,075 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@20432799{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T06:46:57,165 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5aca21ee{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0204abc7-03d6-8b2a-123e-e4463a7b8816/java.io.tmpdir/jetty-localhost-44127-hadoop-hdfs-3_4_1-tests_jar-_-any-12558907698074578109/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T06:46:57,166 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@570cb725{HTTP/1.1, (http/1.1)}{localhost:44127} 2024-11-14T06:46:57,166 INFO [Time-limited test {}] server.Server(415): Started @168763ms 2024-11-14T06:46:57,167 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T06:46:57,182 WARN [ResponseProcessor for block BP-709259206-172.17.0.2-1731566797696:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-709259206-172.17.0.2-1731566797696:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:57,182 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_867469656_22 at /127.0.0.1:48526 [Receiving block BP-709259206-172.17.0.2-1731566797696:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:40597:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48526 dst: /127.0.0.1:40597 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:46:57,188 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@21595673{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T06:46:57,188 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4c50d76a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T06:46:57,188 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T06:46:57,189 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@68e23717{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T06:46:57,189 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@431e378c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0204abc7-03d6-8b2a-123e-e4463a7b8816/hadoop.log.dir/,STOPPED} 2024-11-14T06:46:57,190 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T06:46:57,190 WARN [BP-709259206-172.17.0.2-1731566797696 heartbeating to localhost/127.0.0.1:44227 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T06:46:57,190 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T06:46:57,190 WARN [BP-709259206-172.17.0.2-1731566797696 heartbeating to localhost/127.0.0.1:44227 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-709259206-172.17.0.2-1731566797696 (Datanode Uuid 15ed0fda-6d9b-43d9-b533-c5f98ecefeb6) service to localhost/127.0.0.1:44227 2024-11-14T06:46:57,190 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0204abc7-03d6-8b2a-123e-e4463a7b8816/cluster_4618df87-c57f-5a2a-f298-de46af091010/data/data3/current/BP-709259206-172.17.0.2-1731566797696 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T06:46:57,191 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0204abc7-03d6-8b2a-123e-e4463a7b8816/cluster_4618df87-c57f-5a2a-f298-de46af091010/data/data4/current/BP-709259206-172.17.0.2-1731566797696 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T06:46:57,191 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T06:46:57,200 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T06:46:57,203 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T06:46:57,203 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T06:46:57,203 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T06:46:57,204 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T06:46:57,204 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2ebbad67{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0204abc7-03d6-8b2a-123e-e4463a7b8816/hadoop.log.dir/,AVAILABLE} 2024-11-14T06:46:57,204 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1decdda3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T06:46:57,236 WARN [Thread-1422 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T06:46:57,239 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x873842b45e2783e1 with lease ID 0x13aae018271b680c: from storage DS-72bb6c81-338f-41ef-9f68-8fb00136b930 node DatanodeRegistration(127.0.0.1:42107, datanodeUuid=659fc5c0-125d-4576-aad8-8ccd588b48bc, infoPort=37501, infoSecurePort=0, ipcPort=39393, storageInfo=lv=-57;cid=testClusterID;nsid=665760022;c=1731566797696), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T06:46:57,239 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x873842b45e2783e1 with lease ID 0x13aae018271b680c: from storage DS-9bdf66f7-b45b-4afb-9957-c95cb710f622 node DatanodeRegistration(127.0.0.1:42107, datanodeUuid=659fc5c0-125d-4576-aad8-8ccd588b48bc, infoPort=37501, infoSecurePort=0, ipcPort=39393, storageInfo=lv=-57;cid=testClusterID;nsid=665760022;c=1731566797696), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T06:46:57,267 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:46:57,267 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:46:57,301 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@73f17350{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0204abc7-03d6-8b2a-123e-e4463a7b8816/java.io.tmpdir/jetty-localhost-37911-hadoop-hdfs-3_4_1-tests_jar-_-any-10653528142600332199/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T06:46:57,302 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7da73466{HTTP/1.1, (http/1.1)}{localhost:37911} 2024-11-14T06:46:57,302 INFO [Time-limited test {}] server.Server(415): Started @168899ms 2024-11-14T06:46:57,303 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T06:46:57,371 WARN [Thread-1453 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T06:46:57,374 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf1cd68239bbb59b9 with lease ID 0x13aae018271b680d: from storage DS-6547f4ee-5cd4-4892-b9ea-4fea79a378a7 node DatanodeRegistration(127.0.0.1:44231, datanodeUuid=15ed0fda-6d9b-43d9-b533-c5f98ecefeb6, infoPort=33985, infoSecurePort=0, ipcPort=41941, storageInfo=lv=-57;cid=testClusterID;nsid=665760022;c=1731566797696), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T06:46:57,374 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf1cd68239bbb59b9 with lease ID 0x13aae018271b680d: from storage DS-6b458850-7ccb-4a84-984d-580d50e0a4b0 node DatanodeRegistration(127.0.0.1:44231, datanodeUuid=15ed0fda-6d9b-43d9-b533-c5f98ecefeb6, infoPort=33985, infoSecurePort=0, ipcPort=41941, storageInfo=lv=-57;cid=testClusterID;nsid=665760022;c=1731566797696), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T06:46:58,267 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:46:58,267 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:46:58,320 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-11-14T06:46:58,323 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-11-14T06:46:58,325 ERROR [FSHLog-0-hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5-prefix:20680646cf8a,44539,1731566798308 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40597,DS-6547f4ee-5cd4-4892-b9ea-4fea79a378a7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:58,325 WARN [FSHLog-0-hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5-prefix:20680646cf8a,44539,1731566798308 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40597,DS-6547f4ee-5cd4-4892-b9ea-4fea79a378a7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:58,325 DEBUG [regionserver/20680646cf8a:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 20680646cf8a%2C44539%2C1731566798308:(num 1731566813001) roll requested 2024-11-14T06:46:58,326 INFO [regionserver/20680646cf8a:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 20680646cf8a%2C44539%2C1731566798308.1731566818326 2024-11-14T06:46:58,334 DEBUG [regionserver/20680646cf8a:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.1731566813001 newFile=hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.1731566818326 2024-11-14T06:46:58,334 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:46:58,335 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:46:58,335 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:46:58,335 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:46:58,335 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:46:58,335 INFO [regionserver/20680646cf8a:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.1731566813001 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.1731566818326 2024-11-14T06:46:58,335 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40597,DS-6547f4ee-5cd4-4892-b9ea-4fea79a378a7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:58,335 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40597,DS-6547f4ee-5cd4-4892-b9ea-4fea79a378a7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:46:58,336 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.1731566813001 2024-11-14T06:46:58,336 WARN [IPC Server handler 0 on default port 44227 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.1731566813001 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-11-14T06:46:58,336 DEBUG [regionserver/20680646cf8a:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37501:37501),(127.0.0.1/127.0.0.1:33985:33985)] 2024-11-14T06:46:58,336 DEBUG [regionserver/20680646cf8a:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.1731566813001 is not closed yet, will try archiving it next time 2024-11-14T06:46:58,336 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.1731566813001 after 0ms 2024-11-14T06:46:59,239 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-14T06:46:59,269 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:46:59,269 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:00,270 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:00,270 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:00,338 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 20680646cf8a%2C44539%2C1731566798308.1731566820338 2024-11-14T06:47:00,346 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.1731566818326 newFile=hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.1731566820338 2024-11-14T06:47:00,346 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:47:00,346 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:47:00,346 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:47:00,346 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:47:00,347 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:47:00,347 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.1731566818326 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.1731566820338 2024-11-14T06:47:00,348 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33985:33985),(127.0.0.1/127.0.0.1:37501:37501)] 2024-11-14T06:47:00,348 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.1731566813001 is not closed yet, will try archiving it next time 2024-11-14T06:47:00,348 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.1731566818326 is not closed yet, will try archiving it next time 2024-11-14T06:47:00,349 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.1731566799088 2024-11-14T06:47:00,349 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.1731566799088 2024-11-14T06:47:00,349 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.1731566799088 after 0ms 2024-11-14T06:47:00,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44231 is added to blk_1073741838_1019 (size=1264) 2024-11-14T06:47:00,349 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.1731566799088 2024-11-14T06:47:00,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42107 is added to blk_1073741838_1019 (size=1264) 2024-11-14T06:47:00,350 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.1731566813001 is not closed yet, will try archiving it next time 2024-11-14T06:47:00,359 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1731566800025/Put/vlen=218/seqid=0] 2024-11-14T06:47:00,360 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1731566809719/Put/vlen=1045/seqid=0] 2024-11-14T06:47:00,360 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.1731566799088 2024-11-14T06:47:00,360 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.1731566813001 2024-11-14T06:47:00,360 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.1731566813001 2024-11-14T06:47:00,360 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.1731566813001 after 0ms 2024-11-14T06:47:00,360 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.1731566813001 2024-11-14T06:47:00,364 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1731566813001/Put/vlen=1045/seqid=0] 2024-11-14T06:47:00,364 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1731566815047/Put/vlen=1045/seqid=0] 2024-11-14T06:47:00,364 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.1731566813001 2024-11-14T06:47:00,364 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.1731566818326 2024-11-14T06:47:00,364 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.1731566818326 2024-11-14T06:47:00,364 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.1731566818326 after 0ms 2024-11-14T06:47:00,364 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.1731566818326 2024-11-14T06:47:00,368 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1731566818325/Put/vlen=1045/seqid=0] 2024-11-14T06:47:00,368 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.1731566820338 2024-11-14T06:47:00,368 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.1731566820338 2024-11-14T06:47:00,368 WARN [IPC Server handler 2 on default port 44227 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.1731566820338 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-11-14T06:47:00,369 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.1731566820338 after 1ms 2024-11-14T06:47:01,244 WARN [ResponseProcessor for block BP-709259206-172.17.0.2-1731566797696:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-709259206-172.17.0.2-1731566797696:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:47:01,244 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2126938029_22 at /127.0.0.1:37288 [Receiving block BP-709259206-172.17.0.2-1731566797696:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:44231:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37288 dst: /127.0.0.1:44231 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:44231 remote=/127.0.0.1:37288]. Total timeout mills is 60000, 59100 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:47:01,245 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2126938029_22 at /127.0.0.1:55492 [Receiving block BP-709259206-172.17.0.2-1731566797696:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:42107:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55492 dst: /127.0.0.1:42107 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:47:01,245 WARN [DataStreamer for file /user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.1731566820338 block BP-709259206-172.17.0.2-1731566797696:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-709259206-172.17.0.2-1731566797696:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44231,DS-6547f4ee-5cd4-4892-b9ea-4fea79a378a7,DISK], DatanodeInfoWithStorage[127.0.0.1:42107,DS-72bb6c81-338f-41ef-9f68-8fb00136b930,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44231,DS-6547f4ee-5cd4-4892-b9ea-4fea79a378a7,DISK]) is bad. 2024-11-14T06:47:01,246 WARN [DataStreamer for file /user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.1731566820338 block BP-709259206-172.17.0.2-1731566797696:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-709259206-172.17.0.2-1731566797696:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:47:01,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44231 is added to blk_1073741839_1022 (size=85) 2024-11-14T06:47:01,271 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:01,271 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:02,271 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:02,271 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:02,339 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.1731566813001 after 4002ms 2024-11-14T06:47:03,273 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:03,273 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:04,274 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:04,274 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:04,371 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.1731566820338 after 4002ms 2024-11-14T06:47:04,371 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.1731566820338 2024-11-14T06:47:04,379 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.1731566820338 2024-11-14T06:47:04,379 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 40bca99bbade40641fdc5946bcdbab6a 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-14T06:47:04,379 ERROR [FSHLog-0-hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5-prefix:20680646cf8a,44539,1731566798308 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-709259206-172.17.0.2-1731566797696:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:47:04,380 WARN [FSHLog-0-hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5-prefix:20680646cf8a,44539,1731566798308 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-709259206-172.17.0.2-1731566797696:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:47:04,380 DEBUG [regionserver/20680646cf8a:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 20680646cf8a%2C44539%2C1731566798308:(num 1731566820338) roll requested 2024-11-14T06:47:04,381 INFO [regionserver/20680646cf8a:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 20680646cf8a%2C44539%2C1731566798308.1731566824381 2024-11-14T06:47:04,388 DEBUG [regionserver/20680646cf8a:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.1731566820338 newFile=hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.1731566824381 2024-11-14T06:47:04,388 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:47:04,389 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:47:04,389 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:47:04,389 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:47:04,389 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:47:04,389 INFO [regionserver/20680646cf8a:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.1731566820338 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.1731566824381 2024-11-14T06:47:04,390 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-709259206-172.17.0.2-1731566797696:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:47:04,390 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-709259206-172.17.0.2-1731566797696:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:47:04,391 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.1731566820338 2024-11-14T06:47:04,391 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.1731566820338 after 0ms 2024-11-14T06:47:04,393 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.1731566820338 to hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/oldWALs/20680646cf8a%2C44539%2C1731566798308.1731566820338 2024-11-14T06:47:04,394 DEBUG [regionserver/20680646cf8a:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37501:37501),(127.0.0.1/127.0.0.1:33985:33985)] 2024-11-14T06:47:04,409 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/data/default/TestLogRolling-testLogRollOnPipelineRestart/40bca99bbade40641fdc5946bcdbab6a/.tmp/info/eb8be2ed4dee4da78bb72cdf8f46a5f1 is 1080, key is row1002/info:/1731566809719/Put/seqid=0 2024-11-14T06:47:04,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44231 is added to blk_1073741841_1024 (size=9270) 2024-11-14T06:47:04,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42107 is added to blk_1073741841_1024 (size=9270) 2024-11-14T06:47:04,414 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/data/default/TestLogRolling-testLogRollOnPipelineRestart/40bca99bbade40641fdc5946bcdbab6a/.tmp/info/eb8be2ed4dee4da78bb72cdf8f46a5f1 2024-11-14T06:47:04,421 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/data/default/TestLogRolling-testLogRollOnPipelineRestart/40bca99bbade40641fdc5946bcdbab6a/.tmp/info/eb8be2ed4dee4da78bb72cdf8f46a5f1 as hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/data/default/TestLogRolling-testLogRollOnPipelineRestart/40bca99bbade40641fdc5946bcdbab6a/info/eb8be2ed4dee4da78bb72cdf8f46a5f1 2024-11-14T06:47:04,426 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/data/default/TestLogRolling-testLogRollOnPipelineRestart/40bca99bbade40641fdc5946bcdbab6a/info/eb8be2ed4dee4da78bb72cdf8f46a5f1, entries=4, sequenceid=8, filesize=9.1 K 2024-11-14T06:47:04,428 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for 40bca99bbade40641fdc5946bcdbab6a in 48ms, sequenceid=8, compaction requested=false 2024-11-14T06:47:04,428 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 40bca99bbade40641fdc5946bcdbab6a: 2024-11-14T06:47:04,428 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-11-14T06:47:04,428 ERROR [FSHLog-0-hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5-prefix:20680646cf8a,44539,1731566798308.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36407,DS-72bb6c81-338f-41ef-9f68-8fb00136b930,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:47:04,428 WARN [FSHLog-0-hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5-prefix:20680646cf8a,44539,1731566798308.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36407,DS-72bb6c81-338f-41ef-9f68-8fb00136b930,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:47:04,428 DEBUG [regionserver/20680646cf8a:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 20680646cf8a%2C44539%2C1731566798308.meta:.meta(num 1731566799482) roll requested 2024-11-14T06:47:04,429 INFO [regionserver/20680646cf8a:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 20680646cf8a%2C44539%2C1731566798308.meta.1731566824428.meta 2024-11-14T06:47:04,434 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:47:04,434 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:47:04,434 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:47:04,434 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:47:04,434 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:47:04,434 INFO [regionserver/20680646cf8a:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.meta.1731566799482.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.meta.1731566824428.meta 2024-11-14T06:47:04,436 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36407,DS-72bb6c81-338f-41ef-9f68-8fb00136b930,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:47:04,436 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36407,DS-72bb6c81-338f-41ef-9f68-8fb00136b930,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:47:04,436 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.meta.1731566799482.meta 2024-11-14T06:47:04,437 WARN [IPC Server handler 4 on default port 44227 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.meta.1731566799482.meta has not been closed. Lease recovery is in progress. RecoveryId = 1026 for block blk_1073741834_1013 2024-11-14T06:47:04,437 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.meta.1731566799482.meta after 1ms 2024-11-14T06:47:04,437 DEBUG [regionserver/20680646cf8a:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33985:33985),(127.0.0.1/127.0.0.1:37501:37501)] 2024-11-14T06:47:04,437 DEBUG [regionserver/20680646cf8a:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.meta.1731566799482.meta is not closed yet, will try archiving it next time 2024-11-14T06:47:04,451 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/data/hbase/meta/1588230740/.tmp/info/946255a4d1e1405193e06716aae2790f is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1731566799651.40bca99bbade40641fdc5946bcdbab6a./info:regioninfo/1731566800031/Put/seqid=0 2024-11-14T06:47:04,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44231 is added to blk_1073741843_1027 (size=7125) 2024-11-14T06:47:04,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42107 is added to blk_1073741843_1027 (size=7125) 2024-11-14T06:47:04,457 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/data/hbase/meta/1588230740/.tmp/info/946255a4d1e1405193e06716aae2790f 2024-11-14T06:47:04,476 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/data/hbase/meta/1588230740/.tmp/ns/f5bdba8bea414222b564ccf2436932bf is 43, key is default/ns:d/1731566799519/Put/seqid=0 2024-11-14T06:47:04,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42107 is added to blk_1073741844_1028 (size=5153) 2024-11-14T06:47:04,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44231 is added to blk_1073741844_1028 (size=5153) 2024-11-14T06:47:04,482 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/data/hbase/meta/1588230740/.tmp/ns/f5bdba8bea414222b564ccf2436932bf 2024-11-14T06:47:04,499 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/data/hbase/meta/1588230740/.tmp/table/5f8524ccd7b54fb994312512ddc8396a is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1731566800045/Put/seqid=0 2024-11-14T06:47:04,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44231 is added to blk_1073741845_1029 (size=5438) 2024-11-14T06:47:04,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42107 is added to blk_1073741845_1029 (size=5438) 2024-11-14T06:47:04,504 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/data/hbase/meta/1588230740/.tmp/table/5f8524ccd7b54fb994312512ddc8396a 2024-11-14T06:47:04,510 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/data/hbase/meta/1588230740/.tmp/info/946255a4d1e1405193e06716aae2790f as hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/data/hbase/meta/1588230740/info/946255a4d1e1405193e06716aae2790f 2024-11-14T06:47:04,515 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/data/hbase/meta/1588230740/info/946255a4d1e1405193e06716aae2790f, entries=10, sequenceid=11, filesize=7.0 K 2024-11-14T06:47:04,516 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/data/hbase/meta/1588230740/.tmp/ns/f5bdba8bea414222b564ccf2436932bf as hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/data/hbase/meta/1588230740/ns/f5bdba8bea414222b564ccf2436932bf 2024-11-14T06:47:04,521 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/data/hbase/meta/1588230740/ns/f5bdba8bea414222b564ccf2436932bf, entries=2, sequenceid=11, filesize=5.0 K 2024-11-14T06:47:04,522 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/data/hbase/meta/1588230740/.tmp/table/5f8524ccd7b54fb994312512ddc8396a as hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/data/hbase/meta/1588230740/table/5f8524ccd7b54fb994312512ddc8396a 2024-11-14T06:47:04,528 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/data/hbase/meta/1588230740/table/5f8524ccd7b54fb994312512ddc8396a, entries=2, sequenceid=11, filesize=5.3 K 2024-11-14T06:47:04,529 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 101ms, sequenceid=11, compaction requested=false 2024-11-14T06:47:04,529 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-14T06:47:04,534 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-14T06:47:04,534 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T06:47:04,534 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T06:47:04,534 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T06:47:04,534 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T06:47:04,534 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-14T06:47:04,534 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-14T06:47:04,534 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=884655592, stopped=false 2024-11-14T06:47:04,534 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=20680646cf8a,39387,1731566798261 2024-11-14T06:47:04,535 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39387-0x1003cfbe2150000, quorum=127.0.0.1:62168, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T06:47:04,535 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1003cfbe2150001, quorum=127.0.0.1:62168, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T06:47:04,536 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39387-0x1003cfbe2150000, quorum=127.0.0.1:62168, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:47:04,536 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1003cfbe2150001, quorum=127.0.0.1:62168, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:47:04,536 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T06:47:04,536 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T06:47:04,536 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T06:47:04,536 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T06:47:04,536 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:39387-0x1003cfbe2150000, quorum=127.0.0.1:62168, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T06:47:04,536 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44539-0x1003cfbe2150001, quorum=127.0.0.1:62168, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T06:47:04,536 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '20680646cf8a,44539,1731566798308' ***** 2024-11-14T06:47:04,536 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-14T06:47:04,536 INFO [RS:0;20680646cf8a:44539 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-14T06:47:04,536 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-14T06:47:04,536 INFO [RS:0;20680646cf8a:44539 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-14T06:47:04,537 INFO [RS:0;20680646cf8a:44539 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-14T06:47:04,537 INFO [RS:0;20680646cf8a:44539 {}] regionserver.HRegionServer(3091): Received CLOSE for 40bca99bbade40641fdc5946bcdbab6a 2024-11-14T06:47:04,537 INFO [RS:0;20680646cf8a:44539 {}] regionserver.HRegionServer(959): stopping server 20680646cf8a,44539,1731566798308 2024-11-14T06:47:04,537 INFO [RS:0;20680646cf8a:44539 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T06:47:04,537 INFO [RS:0;20680646cf8a:44539 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;20680646cf8a:44539. 2024-11-14T06:47:04,537 DEBUG [RS:0;20680646cf8a:44539 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T06:47:04,537 DEBUG [RS:0;20680646cf8a:44539 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T06:47:04,537 DEBUG [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 40bca99bbade40641fdc5946bcdbab6a, disabling compactions & flushes 2024-11-14T06:47:04,537 INFO [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731566799651.40bca99bbade40641fdc5946bcdbab6a. 2024-11-14T06:47:04,537 INFO [RS:0;20680646cf8a:44539 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-14T06:47:04,537 INFO [RS:0;20680646cf8a:44539 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-14T06:47:04,537 DEBUG [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731566799651.40bca99bbade40641fdc5946bcdbab6a. 2024-11-14T06:47:04,537 INFO [RS:0;20680646cf8a:44539 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-14T06:47:04,537 DEBUG [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731566799651.40bca99bbade40641fdc5946bcdbab6a. after waiting 0 ms 2024-11-14T06:47:04,537 INFO [RS:0;20680646cf8a:44539 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-14T06:47:04,537 DEBUG [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731566799651.40bca99bbade40641fdc5946bcdbab6a. 2024-11-14T06:47:04,538 INFO [RS:0;20680646cf8a:44539 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-14T06:47:04,538 DEBUG [RS:0;20680646cf8a:44539 {}] regionserver.HRegionServer(1325): Online Regions={40bca99bbade40641fdc5946bcdbab6a=TestLogRolling-testLogRollOnPipelineRestart,,1731566799651.40bca99bbade40641fdc5946bcdbab6a., 1588230740=hbase:meta,,1.1588230740} 2024-11-14T06:47:04,538 DEBUG [RS:0;20680646cf8a:44539 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 40bca99bbade40641fdc5946bcdbab6a 2024-11-14T06:47:04,538 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T06:47:04,538 INFO [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T06:47:04,538 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T06:47:04,538 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T06:47:04,538 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T06:47:04,541 DEBUG [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/data/default/TestLogRolling-testLogRollOnPipelineRestart/40bca99bbade40641fdc5946bcdbab6a/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-11-14T06:47:04,541 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-14T06:47:04,542 INFO [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731566799651.40bca99bbade40641fdc5946bcdbab6a. 2024-11-14T06:47:04,542 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T06:47:04,542 DEBUG [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 40bca99bbade40641fdc5946bcdbab6a: Waiting for close lock at 1731566824537Running coprocessor pre-close hooks at 1731566824537Disabling compacts and flushes for region at 1731566824537Disabling writes for close at 1731566824537Writing region close event to WAL at 1731566824538 (+1 ms)Running coprocessor post-close hooks at 1731566824542 (+4 ms)Closed at 1731566824542 2024-11-14T06:47:04,542 INFO [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T06:47:04,542 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731566824538Running coprocessor pre-close hooks at 1731566824538Disabling compacts and flushes for region at 1731566824538Disabling writes for close at 1731566824538Writing region close event to WAL at 1731566824539 (+1 ms)Running coprocessor post-close hooks at 1731566824542 (+3 ms)Closed at 1731566824542 2024-11-14T06:47:04,542 DEBUG [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731566799651.40bca99bbade40641fdc5946bcdbab6a. 2024-11-14T06:47:04,542 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-14T06:47:04,738 INFO [RS:0;20680646cf8a:44539 {}] regionserver.HRegionServer(976): stopping server 20680646cf8a,44539,1731566798308; all regions closed. 2024-11-14T06:47:04,739 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:47:04,739 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:47:04,739 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:47:04,739 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:47:04,739 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:47:04,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42107 is added to blk_1073741842_1025 (size=825) 2024-11-14T06:47:04,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44231 is added to blk_1073741842_1025 (size=825) 2024-11-14T06:47:04,958 INFO [regionserver/20680646cf8a:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T06:47:04,958 INFO [regionserver/20680646cf8a:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-14T06:47:04,958 INFO [regionserver/20680646cf8a:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-14T06:47:05,276 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:05,276 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:06,277 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:06,278 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:07,278 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:07,278 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:08,242 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-14T06:47:08,279 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:08,279 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:08,377 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1013: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-14T06:47:08,439 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.meta.1731566799482.meta after 4002ms 2024-11-14T06:47:08,440 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/WALs/20680646cf8a,44539,1731566798308/20680646cf8a%2C44539%2C1731566798308.meta.1731566799482.meta to hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/oldWALs/20680646cf8a%2C44539%2C1731566798308.meta.1731566799482.meta 2024-11-14T06:47:08,446 DEBUG [RS:0;20680646cf8a:44539 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/oldWALs 2024-11-14T06:47:08,446 INFO [RS:0;20680646cf8a:44539 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 20680646cf8a%2C44539%2C1731566798308.meta:.meta(num 1731566824428) 2024-11-14T06:47:08,447 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:47:08,447 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:47:08,447 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:47:08,448 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:47:08,448 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:47:08,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44231 is added to blk_1073741840_1023 (size=1162) 2024-11-14T06:47:08,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42107 is added to blk_1073741840_1023 (size=1162) 2024-11-14T06:47:08,457 DEBUG [RS:0;20680646cf8a:44539 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/oldWALs 2024-11-14T06:47:08,457 INFO [RS:0;20680646cf8a:44539 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 20680646cf8a%2C44539%2C1731566798308:(num 1731566824381) 2024-11-14T06:47:08,457 DEBUG [RS:0;20680646cf8a:44539 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T06:47:08,457 INFO [RS:0;20680646cf8a:44539 {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T06:47:08,457 INFO [RS:0;20680646cf8a:44539 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T06:47:08,457 INFO [RS:0;20680646cf8a:44539 {}] hbase.ChoreService(370): Chore service for: regionserver/20680646cf8a:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-14T06:47:08,457 INFO [RS:0;20680646cf8a:44539 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T06:47:08,457 INFO [regionserver/20680646cf8a:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T06:47:08,458 INFO [RS:0;20680646cf8a:44539 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44539 2024-11-14T06:47:08,459 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1003cfbe2150001, quorum=127.0.0.1:62168, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/20680646cf8a,44539,1731566798308 2024-11-14T06:47:08,459 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39387-0x1003cfbe2150000, quorum=127.0.0.1:62168, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T06:47:08,459 INFO [RS:0;20680646cf8a:44539 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T06:47:08,460 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [20680646cf8a,44539,1731566798308] 2024-11-14T06:47:08,460 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/20680646cf8a,44539,1731566798308 already deleted, retry=false 2024-11-14T06:47:08,460 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 20680646cf8a,44539,1731566798308 expired; onlineServers=0 2024-11-14T06:47:08,460 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '20680646cf8a,39387,1731566798261' ***** 2024-11-14T06:47:08,461 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-14T06:47:08,461 INFO [M:0;20680646cf8a:39387 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T06:47:08,461 INFO [M:0;20680646cf8a:39387 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T06:47:08,461 DEBUG [M:0;20680646cf8a:39387 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-14T06:47:08,461 DEBUG [M:0;20680646cf8a:39387 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-14T06:47:08,461 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-14T06:47:08,461 DEBUG [master/20680646cf8a:0:becomeActiveMaster-HFileCleaner.large.0-1731566798879 {}] cleaner.HFileCleaner(306): Exit Thread[master/20680646cf8a:0:becomeActiveMaster-HFileCleaner.large.0-1731566798879,5,FailOnTimeoutGroup] 2024-11-14T06:47:08,461 DEBUG [master/20680646cf8a:0:becomeActiveMaster-HFileCleaner.small.0-1731566798879 {}] cleaner.HFileCleaner(306): Exit Thread[master/20680646cf8a:0:becomeActiveMaster-HFileCleaner.small.0-1731566798879,5,FailOnTimeoutGroup] 2024-11-14T06:47:08,461 INFO [M:0;20680646cf8a:39387 {}] hbase.ChoreService(370): Chore service for: master/20680646cf8a:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-14T06:47:08,461 INFO [M:0;20680646cf8a:39387 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T06:47:08,461 DEBUG [M:0;20680646cf8a:39387 {}] master.HMaster(1795): Stopping service threads 2024-11-14T06:47:08,461 INFO [M:0;20680646cf8a:39387 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-14T06:47:08,461 INFO [M:0;20680646cf8a:39387 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T06:47:08,462 INFO [M:0;20680646cf8a:39387 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-14T06:47:08,462 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-14T06:47:08,462 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39387-0x1003cfbe2150000, quorum=127.0.0.1:62168, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-14T06:47:08,462 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39387-0x1003cfbe2150000, quorum=127.0.0.1:62168, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:47:08,462 DEBUG [M:0;20680646cf8a:39387 {}] zookeeper.ZKUtil(347): master:39387-0x1003cfbe2150000, quorum=127.0.0.1:62168, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-14T06:47:08,462 WARN [M:0;20680646cf8a:39387 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-14T06:47:08,463 INFO [M:0;20680646cf8a:39387 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/.lastflushedseqids 2024-11-14T06:47:08,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42107 is added to blk_1073741846_1030 (size=130) 2024-11-14T06:47:08,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44231 is added to blk_1073741846_1030 (size=130) 2024-11-14T06:47:08,468 INFO [M:0;20680646cf8a:39387 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-14T06:47:08,468 INFO [M:0;20680646cf8a:39387 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-14T06:47:08,469 DEBUG [M:0;20680646cf8a:39387 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T06:47:08,469 INFO [M:0;20680646cf8a:39387 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T06:47:08,469 DEBUG [M:0;20680646cf8a:39387 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T06:47:08,469 DEBUG [M:0;20680646cf8a:39387 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T06:47:08,469 DEBUG [M:0;20680646cf8a:39387 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T06:47:08,469 INFO [M:0;20680646cf8a:39387 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.16 KB heapSize=29.13 KB 2024-11-14T06:47:08,469 ERROR [FSHLog-0-hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/MasterData-prefix:20680646cf8a,39387,1731566798261 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36407,DS-72bb6c81-338f-41ef-9f68-8fb00136b930,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:47:08,469 WARN [FSHLog-0-hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/MasterData-prefix:20680646cf8a,39387,1731566798261 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36407,DS-72bb6c81-338f-41ef-9f68-8fb00136b930,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:47:08,469 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 20680646cf8a%2C39387%2C1731566798261:(num 1731566798806) roll requested 2024-11-14T06:47:08,470 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 20680646cf8a%2C39387%2C1731566798261.1731566828469 2024-11-14T06:47:08,474 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:47:08,474 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:47:08,474 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:47:08,474 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:47:08,474 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:47:08,474 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/MasterData/WALs/20680646cf8a,39387,1731566798261/20680646cf8a%2C39387%2C1731566798261.1731566798806 with entries=53, filesize=26.61 KB; new WAL /user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/MasterData/WALs/20680646cf8a,39387,1731566798261/20680646cf8a%2C39387%2C1731566798261.1731566828469 2024-11-14T06:47:08,474 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36407,DS-72bb6c81-338f-41ef-9f68-8fb00136b930,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:47:08,475 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36407,DS-72bb6c81-338f-41ef-9f68-8fb00136b930,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T06:47:08,475 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/MasterData/WALs/20680646cf8a,39387,1731566798261/20680646cf8a%2C39387%2C1731566798261.1731566798806 2024-11-14T06:47:08,475 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33985:33985),(127.0.0.1/127.0.0.1:37501:37501)] 2024-11-14T06:47:08,475 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/MasterData/WALs/20680646cf8a,39387,1731566798261/20680646cf8a%2C39387%2C1731566798261.1731566798806 is not closed yet, will try archiving it next time 2024-11-14T06:47:08,475 WARN [IPC Server handler 2 on default port 44227 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/MasterData/WALs/20680646cf8a,39387,1731566798261/20680646cf8a%2C39387%2C1731566798261.1731566798806 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1015 2024-11-14T06:47:08,475 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/MasterData/WALs/20680646cf8a,39387,1731566798261/20680646cf8a%2C39387%2C1731566798261.1731566798806 after 0ms 2024-11-14T06:47:08,489 DEBUG [M:0;20680646cf8a:39387 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e1cb025c81ef412399df14e559864d8c is 82, key is hbase:meta,,1/info:regioninfo/1731566799506/Put/seqid=0 2024-11-14T06:47:08,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44231 is added to blk_1073741848_1033 (size=5672) 2024-11-14T06:47:08,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42107 is added to blk_1073741848_1033 (size=5672) 2024-11-14T06:47:08,494 INFO [M:0;20680646cf8a:39387 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e1cb025c81ef412399df14e559864d8c 2024-11-14T06:47:08,513 DEBUG [M:0;20680646cf8a:39387 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b7029d0f63924447aefe6abf79362abf is 777, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731566800051/Put/seqid=0 2024-11-14T06:47:08,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44231 is added to blk_1073741849_1034 (size=6117) 2024-11-14T06:47:08,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42107 is added to blk_1073741849_1034 (size=6117) 2024-11-14T06:47:08,518 INFO [M:0;20680646cf8a:39387 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.56 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b7029d0f63924447aefe6abf79362abf 2024-11-14T06:47:08,541 DEBUG [M:0;20680646cf8a:39387 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/314aaede8a4f4b69914a9b4b1bd88a26 is 69, key is 20680646cf8a,44539,1731566798308/rs:state/1731566798944/Put/seqid=0 2024-11-14T06:47:08,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42107 is added to blk_1073741850_1035 (size=5156) 2024-11-14T06:47:08,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44231 is added to blk_1073741850_1035 (size=5156) 2024-11-14T06:47:08,546 INFO [M:0;20680646cf8a:39387 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/314aaede8a4f4b69914a9b4b1bd88a26 2024-11-14T06:47:08,560 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1003cfbe2150001, quorum=127.0.0.1:62168, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T06:47:08,560 INFO [RS:0;20680646cf8a:44539 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T06:47:08,560 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1003cfbe2150001, quorum=127.0.0.1:62168, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T06:47:08,560 INFO [RS:0;20680646cf8a:44539 {}] regionserver.HRegionServer(1031): Exiting; stopping=20680646cf8a,44539,1731566798308; zookeeper connection closed. 2024-11-14T06:47:08,560 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1274cfae {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1274cfae 2024-11-14T06:47:08,560 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-14T06:47:08,564 DEBUG [M:0;20680646cf8a:39387 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/a3caa92dfea043e0b76791d0551963ce is 52, key is load_balancer_on/state:d/1731566799647/Put/seqid=0 2024-11-14T06:47:08,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42107 is added to blk_1073741851_1036 (size=5056) 2024-11-14T06:47:08,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44231 is added to blk_1073741851_1036 (size=5056) 2024-11-14T06:47:08,571 INFO [M:0;20680646cf8a:39387 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/a3caa92dfea043e0b76791d0551963ce 2024-11-14T06:47:08,576 DEBUG [M:0;20680646cf8a:39387 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e1cb025c81ef412399df14e559864d8c as hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/e1cb025c81ef412399df14e559864d8c 2024-11-14T06:47:08,581 INFO [M:0;20680646cf8a:39387 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/e1cb025c81ef412399df14e559864d8c, entries=8, sequenceid=56, filesize=5.5 K 2024-11-14T06:47:08,582 DEBUG [M:0;20680646cf8a:39387 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b7029d0f63924447aefe6abf79362abf as hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b7029d0f63924447aefe6abf79362abf 2024-11-14T06:47:08,587 INFO [M:0;20680646cf8a:39387 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b7029d0f63924447aefe6abf79362abf, entries=6, sequenceid=56, filesize=6.0 K 2024-11-14T06:47:08,587 DEBUG [M:0;20680646cf8a:39387 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/314aaede8a4f4b69914a9b4b1bd88a26 as hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/314aaede8a4f4b69914a9b4b1bd88a26 2024-11-14T06:47:08,592 INFO [M:0;20680646cf8a:39387 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/314aaede8a4f4b69914a9b4b1bd88a26, entries=1, sequenceid=56, filesize=5.0 K 2024-11-14T06:47:08,593 DEBUG [M:0;20680646cf8a:39387 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/a3caa92dfea043e0b76791d0551963ce as hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/a3caa92dfea043e0b76791d0551963ce 2024-11-14T06:47:08,597 INFO [M:0;20680646cf8a:39387 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/a3caa92dfea043e0b76791d0551963ce, entries=1, sequenceid=56, filesize=4.9 K 2024-11-14T06:47:08,598 INFO [M:0;20680646cf8a:39387 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.16 KB/23714, heapSize ~29.07 KB/29768, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 129ms, sequenceid=56, compaction requested=false 2024-11-14T06:47:08,599 INFO [M:0;20680646cf8a:39387 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T06:47:08,600 DEBUG [M:0;20680646cf8a:39387 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731566828468Disabling compacts and flushes for region at 1731566828468Disabling writes for close at 1731566828469 (+1 ms)Obtaining lock to block concurrent updates at 1731566828469Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731566828469Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23714, getHeapSize=29768, getOffHeapSize=0, getCellsCount=67 at 1731566828469Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731566828475 (+6 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731566828475Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731566828489 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731566828489Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731566828499 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731566828513 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731566828513Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731566828523 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731566828541 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731566828541Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731566828551 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731566828563 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731566828563Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4e061d81: reopening flushed file at 1731566828575 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@67f16921: reopening flushed file at 1731566828581 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@760e56f1: reopening flushed file at 1731566828587 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3b13e896: reopening flushed file at 1731566828592 (+5 ms)Finished flush of dataSize ~23.16 KB/23714, heapSize ~29.07 KB/29768, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 129ms, sequenceid=56, compaction requested=false at 1731566828598 (+6 ms)Writing region close event to WAL at 1731566828599 (+1 ms)Closed at 1731566828599 2024-11-14T06:47:08,600 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:47:08,600 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:47:08,600 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:47:08,600 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:47:08,600 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:47:08,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42107 is added to blk_1073741847_1031 (size=757) 2024-11-14T06:47:08,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44231 is added to blk_1073741847_1031 (size=757) 2024-11-14T06:47:09,280 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:09,280 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:09,542 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:47:09,543 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:47:09,565 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:47:09,566 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:47:09,566 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:47:09,566 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:47:09,567 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:47:09,567 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:47:09,571 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:47:09,571 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:47:09,571 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:47:09,573 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:47:09,578 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:47:09,578 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:47:10,081 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-14T06:47:10,082 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:47:10,083 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:47:10,083 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:47:10,084 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:47:10,111 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:47:10,111 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:47:10,111 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:47:10,112 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:47:10,112 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:47:10,112 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:47:10,117 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:47:10,118 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:47:10,118 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:47:10,120 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:47:10,280 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:10,280 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:11,281 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:11,281 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:11,378 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1015: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-14T06:47:12,282 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:12,282 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:12,476 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/MasterData/WALs/20680646cf8a,39387,1731566798261/20680646cf8a%2C39387%2C1731566798261.1731566798806 after 4001ms 2024-11-14T06:47:12,477 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/MasterData/WALs/20680646cf8a,39387,1731566798261/20680646cf8a%2C39387%2C1731566798261.1731566798806 to hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/MasterData/oldWALs/20680646cf8a%2C39387%2C1731566798261.1731566798806 2024-11-14T06:47:12,482 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/MasterData/oldWALs/20680646cf8a%2C39387%2C1731566798261.1731566798806 to hdfs://localhost:44227/user/jenkins/test-data/d7e36408-a4d3-4675-1465-ded15bf72dc5/oldWALs/20680646cf8a%2C39387%2C1731566798261.1731566798806$masterlocalwal$ 2024-11-14T06:47:12,483 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T06:47:12,483 INFO [M:0;20680646cf8a:39387 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-14T06:47:12,483 INFO [M:0;20680646cf8a:39387 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39387 2024-11-14T06:47:12,483 INFO [M:0;20680646cf8a:39387 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T06:47:12,586 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39387-0x1003cfbe2150000, quorum=127.0.0.1:62168, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T06:47:12,586 INFO [M:0;20680646cf8a:39387 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T06:47:12,586 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39387-0x1003cfbe2150000, quorum=127.0.0.1:62168, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T06:47:12,592 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@73f17350{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T06:47:12,593 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7da73466{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T06:47:12,593 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T06:47:12,593 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1decdda3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T06:47:12,593 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2ebbad67{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0204abc7-03d6-8b2a-123e-e4463a7b8816/hadoop.log.dir/,STOPPED} 2024-11-14T06:47:12,594 WARN [BP-709259206-172.17.0.2-1731566797696 heartbeating to localhost/127.0.0.1:44227 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T06:47:12,594 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T06:47:12,595 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T06:47:12,595 WARN [BP-709259206-172.17.0.2-1731566797696 heartbeating to localhost/127.0.0.1:44227 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-709259206-172.17.0.2-1731566797696 (Datanode Uuid 15ed0fda-6d9b-43d9-b533-c5f98ecefeb6) service to localhost/127.0.0.1:44227 2024-11-14T06:47:12,595 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0204abc7-03d6-8b2a-123e-e4463a7b8816/cluster_4618df87-c57f-5a2a-f298-de46af091010/data/data3/current/BP-709259206-172.17.0.2-1731566797696 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T06:47:12,596 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0204abc7-03d6-8b2a-123e-e4463a7b8816/cluster_4618df87-c57f-5a2a-f298-de46af091010/data/data4/current/BP-709259206-172.17.0.2-1731566797696 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T06:47:12,596 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T06:47:12,598 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5aca21ee{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T06:47:12,598 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@570cb725{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T06:47:12,598 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T06:47:12,599 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@20432799{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T06:47:12,599 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@32c717fb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0204abc7-03d6-8b2a-123e-e4463a7b8816/hadoop.log.dir/,STOPPED} 2024-11-14T06:47:12,600 WARN [BP-709259206-172.17.0.2-1731566797696 heartbeating to localhost/127.0.0.1:44227 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T06:47:12,600 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T06:47:12,600 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T06:47:12,600 WARN [BP-709259206-172.17.0.2-1731566797696 heartbeating to localhost/127.0.0.1:44227 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-709259206-172.17.0.2-1731566797696 (Datanode Uuid 659fc5c0-125d-4576-aad8-8ccd588b48bc) service to localhost/127.0.0.1:44227 2024-11-14T06:47:12,600 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0204abc7-03d6-8b2a-123e-e4463a7b8816/cluster_4618df87-c57f-5a2a-f298-de46af091010/data/data1/current/BP-709259206-172.17.0.2-1731566797696 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T06:47:12,601 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0204abc7-03d6-8b2a-123e-e4463a7b8816/cluster_4618df87-c57f-5a2a-f298-de46af091010/data/data2/current/BP-709259206-172.17.0.2-1731566797696 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T06:47:12,601 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T06:47:12,607 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1ea36316{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T06:47:12,607 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4d48810f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T06:47:12,607 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T06:47:12,607 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@13fdd007{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T06:47:12,607 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@421a8f73{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0204abc7-03d6-8b2a-123e-e4463a7b8816/hadoop.log.dir/,STOPPED} 2024-11-14T06:47:12,614 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-14T06:47:12,630 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-14T06:47:12,637 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=179 (was 154) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:44227 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:44227 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44227 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:44227 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:44227 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:44227 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44227 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44227 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=455 (was 450) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=80 (was 98), ProcessCount=11 (was 11), AvailableMemoryMB=260 (was 387) 2024-11-14T06:47:12,644 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=179, OpenFileDescriptor=455, MaxFileDescriptor=1048576, SystemLoadAverage=80, ProcessCount=11, AvailableMemoryMB=260 2024-11-14T06:47:12,645 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-14T06:47:12,645 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0204abc7-03d6-8b2a-123e-e4463a7b8816/hadoop.log.dir so I do NOT create it in target/test-data/45d68592-c015-287b-353c-c1c74f818e22 2024-11-14T06:47:12,645 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0204abc7-03d6-8b2a-123e-e4463a7b8816/hadoop.tmp.dir so I do NOT create it in target/test-data/45d68592-c015-287b-353c-c1c74f818e22 2024-11-14T06:47:12,645 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45d68592-c015-287b-353c-c1c74f818e22/cluster_96f36b60-77a4-0898-83c2-780ff865c39a, deleteOnExit=true 2024-11-14T06:47:12,645 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-14T06:47:12,645 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45d68592-c015-287b-353c-c1c74f818e22/test.cache.data in system properties and HBase conf 2024-11-14T06:47:12,645 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45d68592-c015-287b-353c-c1c74f818e22/hadoop.tmp.dir in system properties and HBase conf 2024-11-14T06:47:12,645 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45d68592-c015-287b-353c-c1c74f818e22/hadoop.log.dir in system properties and HBase conf 2024-11-14T06:47:12,645 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45d68592-c015-287b-353c-c1c74f818e22/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-14T06:47:12,646 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45d68592-c015-287b-353c-c1c74f818e22/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-14T06:47:12,646 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-14T06:47:12,646 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-14T06:47:12,646 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45d68592-c015-287b-353c-c1c74f818e22/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-14T06:47:12,646 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45d68592-c015-287b-353c-c1c74f818e22/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-14T06:47:12,646 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45d68592-c015-287b-353c-c1c74f818e22/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-14T06:47:12,646 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45d68592-c015-287b-353c-c1c74f818e22/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T06:47:12,646 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45d68592-c015-287b-353c-c1c74f818e22/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-14T06:47:12,646 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45d68592-c015-287b-353c-c1c74f818e22/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-14T06:47:12,646 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45d68592-c015-287b-353c-c1c74f818e22/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T06:47:12,646 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45d68592-c015-287b-353c-c1c74f818e22/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T06:47:12,646 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45d68592-c015-287b-353c-c1c74f818e22/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-14T06:47:12,646 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45d68592-c015-287b-353c-c1c74f818e22/nfs.dump.dir in system properties and HBase conf 2024-11-14T06:47:12,647 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45d68592-c015-287b-353c-c1c74f818e22/java.io.tmpdir in system properties and HBase conf 2024-11-14T06:47:12,647 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45d68592-c015-287b-353c-c1c74f818e22/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T06:47:12,647 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45d68592-c015-287b-353c-c1c74f818e22/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-14T06:47:12,647 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45d68592-c015-287b-353c-c1c74f818e22/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-14T06:47:12,660 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T06:47:12,705 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T06:47:12,710 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T06:47:12,711 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T06:47:12,711 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T06:47:12,711 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T06:47:12,711 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T06:47:12,712 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3ece2bc8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45d68592-c015-287b-353c-c1c74f818e22/hadoop.log.dir/,AVAILABLE} 2024-11-14T06:47:12,712 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@16208fe2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T06:47:12,805 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4e2e10dc{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45d68592-c015-287b-353c-c1c74f818e22/java.io.tmpdir/jetty-localhost-46669-hadoop-hdfs-3_4_1-tests_jar-_-any-59184548848531065/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T06:47:12,805 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3cb009a1{HTTP/1.1, (http/1.1)}{localhost:46669} 2024-11-14T06:47:12,805 INFO [Time-limited test {}] server.Server(415): Started @184402ms 2024-11-14T06:47:12,815 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T06:47:12,854 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T06:47:12,858 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T06:47:12,859 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T06:47:12,859 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T06:47:12,859 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T06:47:12,861 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@168edd0a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45d68592-c015-287b-353c-c1c74f818e22/hadoop.log.dir/,AVAILABLE} 2024-11-14T06:47:12,861 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1fa04e54{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T06:47:12,953 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1d6dee42{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45d68592-c015-287b-353c-c1c74f818e22/java.io.tmpdir/jetty-localhost-35451-hadoop-hdfs-3_4_1-tests_jar-_-any-4755381630147700175/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T06:47:12,954 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3df88721{HTTP/1.1, (http/1.1)}{localhost:35451} 2024-11-14T06:47:12,954 INFO [Time-limited test {}] server.Server(415): Started @184551ms 2024-11-14T06:47:12,955 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T06:47:12,981 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T06:47:12,985 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T06:47:12,985 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T06:47:12,986 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T06:47:12,986 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T06:47:12,986 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4ed77c81{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45d68592-c015-287b-353c-c1c74f818e22/hadoop.log.dir/,AVAILABLE} 2024-11-14T06:47:12,986 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@206f042f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T06:47:13,009 WARN [Thread-1647 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45d68592-c015-287b-353c-c1c74f818e22/cluster_96f36b60-77a4-0898-83c2-780ff865c39a/data/data1/current/BP-1653618985-172.17.0.2-1731566832670/current, will proceed with Du for space computation calculation, 2024-11-14T06:47:13,009 WARN [Thread-1648 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45d68592-c015-287b-353c-c1c74f818e22/cluster_96f36b60-77a4-0898-83c2-780ff865c39a/data/data2/current/BP-1653618985-172.17.0.2-1731566832670/current, will proceed with Du for space computation calculation, 2024-11-14T06:47:13,028 WARN [Thread-1626 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T06:47:13,030 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8cf75cffb97def2e with lease ID 0x909c747ff38c8fdc: Processing first storage report for DS-58efc96d-a1c9-458a-9ae8-aebcae50df0b from datanode DatanodeRegistration(127.0.0.1:40215, datanodeUuid=29635134-ca14-4c5e-b4f1-22ea7f2be238, infoPort=33507, infoSecurePort=0, ipcPort=35443, storageInfo=lv=-57;cid=testClusterID;nsid=915477601;c=1731566832670) 2024-11-14T06:47:13,030 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8cf75cffb97def2e with lease ID 0x909c747ff38c8fdc: from storage DS-58efc96d-a1c9-458a-9ae8-aebcae50df0b node DatanodeRegistration(127.0.0.1:40215, datanodeUuid=29635134-ca14-4c5e-b4f1-22ea7f2be238, infoPort=33507, infoSecurePort=0, ipcPort=35443, storageInfo=lv=-57;cid=testClusterID;nsid=915477601;c=1731566832670), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T06:47:13,030 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8cf75cffb97def2e with lease ID 0x909c747ff38c8fdc: Processing first storage report for DS-754cc37e-d9a4-48ac-933b-59d70d84655e from datanode DatanodeRegistration(127.0.0.1:40215, datanodeUuid=29635134-ca14-4c5e-b4f1-22ea7f2be238, infoPort=33507, infoSecurePort=0, ipcPort=35443, storageInfo=lv=-57;cid=testClusterID;nsid=915477601;c=1731566832670) 2024-11-14T06:47:13,030 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8cf75cffb97def2e with lease ID 0x909c747ff38c8fdc: from storage DS-754cc37e-d9a4-48ac-933b-59d70d84655e node DatanodeRegistration(127.0.0.1:40215, datanodeUuid=29635134-ca14-4c5e-b4f1-22ea7f2be238, infoPort=33507, infoSecurePort=0, ipcPort=35443, storageInfo=lv=-57;cid=testClusterID;nsid=915477601;c=1731566832670), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T06:47:13,085 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@73476a0a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45d68592-c015-287b-353c-c1c74f818e22/java.io.tmpdir/jetty-localhost-41289-hadoop-hdfs-3_4_1-tests_jar-_-any-400689223958069867/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T06:47:13,085 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@35023063{HTTP/1.1, (http/1.1)}{localhost:41289} 2024-11-14T06:47:13,085 INFO [Time-limited test {}] server.Server(415): Started @184682ms 2024-11-14T06:47:13,087 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T06:47:13,141 WARN [Thread-1673 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45d68592-c015-287b-353c-c1c74f818e22/cluster_96f36b60-77a4-0898-83c2-780ff865c39a/data/data3/current/BP-1653618985-172.17.0.2-1731566832670/current, will proceed with Du for space computation calculation, 2024-11-14T06:47:13,141 WARN [Thread-1674 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45d68592-c015-287b-353c-c1c74f818e22/cluster_96f36b60-77a4-0898-83c2-780ff865c39a/data/data4/current/BP-1653618985-172.17.0.2-1731566832670/current, will proceed with Du for space computation calculation, 2024-11-14T06:47:13,156 WARN [Thread-1662 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T06:47:13,158 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1ecf5e88c3ad701e with lease ID 0x909c747ff38c8fdd: Processing first storage report for DS-4a5f387d-50dc-40ec-b9bd-83b00d0a7f8a from datanode DatanodeRegistration(127.0.0.1:34469, datanodeUuid=5187d187-9484-4741-9e8f-5c147f7cf165, infoPort=33883, infoSecurePort=0, ipcPort=43843, storageInfo=lv=-57;cid=testClusterID;nsid=915477601;c=1731566832670) 2024-11-14T06:47:13,158 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1ecf5e88c3ad701e with lease ID 0x909c747ff38c8fdd: from storage DS-4a5f387d-50dc-40ec-b9bd-83b00d0a7f8a node DatanodeRegistration(127.0.0.1:34469, datanodeUuid=5187d187-9484-4741-9e8f-5c147f7cf165, infoPort=33883, infoSecurePort=0, ipcPort=43843, storageInfo=lv=-57;cid=testClusterID;nsid=915477601;c=1731566832670), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T06:47:13,158 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1ecf5e88c3ad701e with lease ID 0x909c747ff38c8fdd: Processing first storage report for DS-26917804-898b-4e68-b65a-9592bc1db1ad from datanode DatanodeRegistration(127.0.0.1:34469, datanodeUuid=5187d187-9484-4741-9e8f-5c147f7cf165, infoPort=33883, infoSecurePort=0, ipcPort=43843, storageInfo=lv=-57;cid=testClusterID;nsid=915477601;c=1731566832670) 2024-11-14T06:47:13,158 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1ecf5e88c3ad701e with lease ID 0x909c747ff38c8fdd: from storage DS-26917804-898b-4e68-b65a-9592bc1db1ad node DatanodeRegistration(127.0.0.1:34469, datanodeUuid=5187d187-9484-4741-9e8f-5c147f7cf165, infoPort=33883, infoSecurePort=0, ipcPort=43843, storageInfo=lv=-57;cid=testClusterID;nsid=915477601;c=1731566832670), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T06:47:13,212 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45d68592-c015-287b-353c-c1c74f818e22 2024-11-14T06:47:13,215 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45d68592-c015-287b-353c-c1c74f818e22/cluster_96f36b60-77a4-0898-83c2-780ff865c39a/zookeeper_0, clientPort=52340, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45d68592-c015-287b-353c-c1c74f818e22/cluster_96f36b60-77a4-0898-83c2-780ff865c39a/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45d68592-c015-287b-353c-c1c74f818e22/cluster_96f36b60-77a4-0898-83c2-780ff865c39a/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-14T06:47:13,215 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=52340 2024-11-14T06:47:13,216 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T06:47:13,217 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T06:47:13,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40215 is added to blk_1073741825_1001 (size=7) 2024-11-14T06:47:13,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34469 is added to blk_1073741825_1001 (size=7) 2024-11-14T06:47:13,229 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa with version=8 2024-11-14T06:47:13,229 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/hbase-staging 2024-11-14T06:47:13,231 INFO [Time-limited test {}] client.ConnectionUtils(128): master/20680646cf8a:0 server-side Connection retries=45 2024-11-14T06:47:13,232 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T06:47:13,232 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T06:47:13,232 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T06:47:13,232 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T06:47:13,232 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T06:47:13,232 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-14T06:47:13,232 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T06:47:13,233 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34767 2024-11-14T06:47:13,235 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:34767 connecting to ZooKeeper ensemble=127.0.0.1:52340 2024-11-14T06:47:13,239 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:347670x0, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T06:47:13,239 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34767-0x1003cfc6aac0000 connected 2024-11-14T06:47:13,249 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T06:47:13,250 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T06:47:13,252 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34767-0x1003cfc6aac0000, quorum=127.0.0.1:52340, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T06:47:13,252 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa, hbase.cluster.distributed=false 2024-11-14T06:47:13,253 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34767-0x1003cfc6aac0000, quorum=127.0.0.1:52340, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T06:47:13,253 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34767 2024-11-14T06:47:13,254 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34767 2024-11-14T06:47:13,254 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34767 2024-11-14T06:47:13,255 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34767 2024-11-14T06:47:13,257 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34767 2024-11-14T06:47:13,270 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/20680646cf8a:0 server-side Connection retries=45 2024-11-14T06:47:13,270 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T06:47:13,270 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T06:47:13,270 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T06:47:13,270 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T06:47:13,270 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T06:47:13,270 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-14T06:47:13,270 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T06:47:13,271 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45511 2024-11-14T06:47:13,272 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45511 connecting to ZooKeeper ensemble=127.0.0.1:52340 2024-11-14T06:47:13,272 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T06:47:13,274 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T06:47:13,277 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:455110x0, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T06:47:13,277 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45511-0x1003cfc6aac0001, quorum=127.0.0.1:52340, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T06:47:13,277 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45511-0x1003cfc6aac0001 connected 2024-11-14T06:47:13,278 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-14T06:47:13,280 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-14T06:47:13,281 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45511-0x1003cfc6aac0001, quorum=127.0.0.1:52340, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-14T06:47:13,281 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45511-0x1003cfc6aac0001, quorum=127.0.0.1:52340, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T06:47:13,282 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45511 2024-11-14T06:47:13,282 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45511 2024-11-14T06:47:13,282 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45511 2024-11-14T06:47:13,283 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45511 2024-11-14T06:47:13,283 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45511 2024-11-14T06:47:13,283 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:13,283 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:13,293 DEBUG [M:0;20680646cf8a:34767 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;20680646cf8a:34767 2024-11-14T06:47:13,293 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/20680646cf8a,34767,1731566833231 2024-11-14T06:47:13,294 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34767-0x1003cfc6aac0000, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T06:47:13,294 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45511-0x1003cfc6aac0001, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T06:47:13,295 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34767-0x1003cfc6aac0000, quorum=127.0.0.1:52340, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/20680646cf8a,34767,1731566833231 2024-11-14T06:47:13,296 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45511-0x1003cfc6aac0001, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-14T06:47:13,296 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34767-0x1003cfc6aac0000, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:47:13,296 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45511-0x1003cfc6aac0001, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:47:13,296 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34767-0x1003cfc6aac0000, quorum=127.0.0.1:52340, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-14T06:47:13,296 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/20680646cf8a,34767,1731566833231 from backup master directory 2024-11-14T06:47:13,297 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34767-0x1003cfc6aac0000, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/20680646cf8a,34767,1731566833231 2024-11-14T06:47:13,297 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34767-0x1003cfc6aac0000, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T06:47:13,297 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45511-0x1003cfc6aac0001, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T06:47:13,297 WARN [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T06:47:13,297 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=20680646cf8a,34767,1731566833231 2024-11-14T06:47:13,301 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/hbase.id] with ID: 47521f9f-14e9-411e-b2f5-e37a0c18d456 2024-11-14T06:47:13,301 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/.tmp/hbase.id 2024-11-14T06:47:13,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34469 is added to blk_1073741826_1002 (size=42) 2024-11-14T06:47:13,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40215 is added to blk_1073741826_1002 (size=42) 2024-11-14T06:47:13,307 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/.tmp/hbase.id]:[hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/hbase.id] 2024-11-14T06:47:13,317 INFO [master/20680646cf8a:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T06:47:13,317 INFO [master/20680646cf8a:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-14T06:47:13,318 INFO [master/20680646cf8a:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-14T06:47:13,320 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45511-0x1003cfc6aac0001, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:47:13,320 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34767-0x1003cfc6aac0000, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:47:13,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40215 is added to blk_1073741827_1003 (size=196) 2024-11-14T06:47:13,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34469 is added to blk_1073741827_1003 (size=196) 2024-11-14T06:47:13,329 INFO [master/20680646cf8a:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-14T06:47:13,330 INFO [master/20680646cf8a:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-14T06:47:13,330 INFO [master/20680646cf8a:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T06:47:13,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40215 is added to blk_1073741828_1004 (size=1189) 2024-11-14T06:47:13,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34469 is added to blk_1073741828_1004 (size=1189) 2024-11-14T06:47:13,338 INFO [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/MasterData/data/master/store 2024-11-14T06:47:13,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34469 is added to blk_1073741829_1005 (size=34) 2024-11-14T06:47:13,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40215 is added to blk_1073741829_1005 (size=34) 2024-11-14T06:47:13,344 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T06:47:13,344 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T06:47:13,344 INFO [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T06:47:13,344 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T06:47:13,344 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T06:47:13,344 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T06:47:13,344 INFO [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T06:47:13,345 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731566833344Disabling compacts and flushes for region at 1731566833344Disabling writes for close at 1731566833344Writing region close event to WAL at 1731566833344Closed at 1731566833344 2024-11-14T06:47:13,345 WARN [master/20680646cf8a:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/MasterData/data/master/store/.initializing 2024-11-14T06:47:13,345 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/MasterData/WALs/20680646cf8a,34767,1731566833231 2024-11-14T06:47:13,347 INFO [master/20680646cf8a:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=20680646cf8a%2C34767%2C1731566833231, suffix=, logDir=hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/MasterData/WALs/20680646cf8a,34767,1731566833231, archiveDir=hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/MasterData/oldWALs, maxLogs=10 2024-11-14T06:47:13,348 INFO [master/20680646cf8a:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 20680646cf8a%2C34767%2C1731566833231.1731566833348 2024-11-14T06:47:13,352 INFO [master/20680646cf8a:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/MasterData/WALs/20680646cf8a,34767,1731566833231/20680646cf8a%2C34767%2C1731566833231.1731566833348 2024-11-14T06:47:13,353 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33507:33507),(127.0.0.1/127.0.0.1:33883:33883)] 2024-11-14T06:47:13,357 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-14T06:47:13,357 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T06:47:13,357 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:47:13,357 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:47:13,359 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:47:13,360 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-14T06:47:13,360 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:47:13,360 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T06:47:13,360 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:47:13,361 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-14T06:47:13,361 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:47:13,361 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T06:47:13,362 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:47:13,362 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-14T06:47:13,362 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:47:13,363 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T06:47:13,363 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:47:13,364 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-14T06:47:13,364 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:47:13,364 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T06:47:13,370 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:47:13,371 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:47:13,371 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:47:13,373 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:47:13,373 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:47:13,374 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-14T06:47:13,375 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:47:13,377 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T06:47:13,377 INFO [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=808468, jitterRate=0.0280207097530365}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-14T06:47:13,378 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731566833357Initializing all the Stores at 1731566833358 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731566833358Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731566833358Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731566833358Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731566833358Cleaning up temporary data from old regions at 1731566833373 (+15 ms)Region opened successfully at 1731566833378 (+5 ms) 2024-11-14T06:47:13,379 INFO [master/20680646cf8a:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-14T06:47:13,381 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4391e7de, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=20680646cf8a/172.17.0.2:0 2024-11-14T06:47:13,382 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-14T06:47:13,383 INFO [master/20680646cf8a:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-14T06:47:13,383 INFO [master/20680646cf8a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-14T06:47:13,383 INFO [master/20680646cf8a:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-14T06:47:13,383 INFO [master/20680646cf8a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-14T06:47:13,384 INFO [master/20680646cf8a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-14T06:47:13,384 INFO [master/20680646cf8a:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-14T06:47:13,386 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-14T06:47:13,387 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34767-0x1003cfc6aac0000, quorum=127.0.0.1:52340, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-14T06:47:13,388 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-14T06:47:13,388 INFO [master/20680646cf8a:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-14T06:47:13,389 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34767-0x1003cfc6aac0000, quorum=127.0.0.1:52340, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-14T06:47:13,389 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-14T06:47:13,390 INFO [master/20680646cf8a:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-14T06:47:13,391 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34767-0x1003cfc6aac0000, quorum=127.0.0.1:52340, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-14T06:47:13,392 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-14T06:47:13,392 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34767-0x1003cfc6aac0000, quorum=127.0.0.1:52340, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-14T06:47:13,394 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-14T06:47:13,396 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34767-0x1003cfc6aac0000, quorum=127.0.0.1:52340, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-14T06:47:13,397 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-14T06:47:13,398 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45511-0x1003cfc6aac0001, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T06:47:13,398 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34767-0x1003cfc6aac0000, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T06:47:13,398 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45511-0x1003cfc6aac0001, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:47:13,398 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34767-0x1003cfc6aac0000, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:47:13,399 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=20680646cf8a,34767,1731566833231, sessionid=0x1003cfc6aac0000, setting cluster-up flag (Was=false) 2024-11-14T06:47:13,400 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34767-0x1003cfc6aac0000, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:47:13,400 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45511-0x1003cfc6aac0001, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:47:13,402 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-14T06:47:13,403 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=20680646cf8a,34767,1731566833231 2024-11-14T06:47:13,405 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45511-0x1003cfc6aac0001, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:47:13,405 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34767-0x1003cfc6aac0000, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:47:13,408 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-14T06:47:13,409 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=20680646cf8a,34767,1731566833231 2024-11-14T06:47:13,409 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-14T06:47:13,411 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-14T06:47:13,411 INFO [master/20680646cf8a:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-14T06:47:13,411 INFO [master/20680646cf8a:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-14T06:47:13,411 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 20680646cf8a,34767,1731566833231 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-14T06:47:13,412 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/20680646cf8a:0, corePoolSize=5, maxPoolSize=5 2024-11-14T06:47:13,412 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/20680646cf8a:0, corePoolSize=5, maxPoolSize=5 2024-11-14T06:47:13,412 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/20680646cf8a:0, corePoolSize=5, maxPoolSize=5 2024-11-14T06:47:13,413 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/20680646cf8a:0, corePoolSize=5, maxPoolSize=5 2024-11-14T06:47:13,413 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/20680646cf8a:0, corePoolSize=10, maxPoolSize=10 2024-11-14T06:47:13,413 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:47:13,413 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/20680646cf8a:0, corePoolSize=2, maxPoolSize=2 2024-11-14T06:47:13,413 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:47:13,413 INFO [master/20680646cf8a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731566863413 2024-11-14T06:47:13,414 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-14T06:47:13,414 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-14T06:47:13,414 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-14T06:47:13,414 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-14T06:47:13,414 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-14T06:47:13,414 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-14T06:47:13,414 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T06:47:13,414 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-14T06:47:13,414 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-14T06:47:13,414 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-14T06:47:13,415 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-14T06:47:13,415 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-14T06:47:13,415 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/20680646cf8a:0:becomeActiveMaster-HFileCleaner.large.0-1731566833415,5,FailOnTimeoutGroup] 2024-11-14T06:47:13,415 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/20680646cf8a:0:becomeActiveMaster-HFileCleaner.small.0-1731566833415,5,FailOnTimeoutGroup] 2024-11-14T06:47:13,415 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T06:47:13,415 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T06:47:13,415 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-14T06:47:13,415 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-14T06:47:13,415 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-14T06:47:13,415 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-14T06:47:13,416 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:47:13,416 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-14T06:47:13,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40215 is added to blk_1073741831_1007 (size=1321) 2024-11-14T06:47:13,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34469 is added to blk_1073741831_1007 (size=1321) 2024-11-14T06:47:13,424 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-14T06:47:13,425 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa 2024-11-14T06:47:13,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34469 is added to blk_1073741832_1008 (size=32) 2024-11-14T06:47:13,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40215 is added to blk_1073741832_1008 (size=32) 2024-11-14T06:47:13,433 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T06:47:13,437 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T06:47:13,438 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T06:47:13,438 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:47:13,439 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T06:47:13,439 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T06:47:13,440 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T06:47:13,440 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:47:13,440 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T06:47:13,440 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T06:47:13,441 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T06:47:13,441 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:47:13,442 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T06:47:13,442 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T06:47:13,443 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T06:47:13,443 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:47:13,444 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T06:47:13,444 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T06:47:13,445 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/data/hbase/meta/1588230740 2024-11-14T06:47:13,445 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/data/hbase/meta/1588230740 2024-11-14T06:47:13,446 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T06:47:13,446 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T06:47:13,447 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T06:47:13,448 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T06:47:13,449 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T06:47:13,450 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=811574, jitterRate=0.0319698303937912}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T06:47:13,450 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731566833434Initializing all the Stores at 1731566833434Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731566833434Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731566833437 (+3 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731566833437Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731566833437Cleaning up temporary data from old regions at 1731566833446 (+9 ms)Region opened successfully at 1731566833450 (+4 ms) 2024-11-14T06:47:13,450 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T06:47:13,450 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T06:47:13,450 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T06:47:13,450 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T06:47:13,450 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T06:47:13,451 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T06:47:13,451 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731566833450Disabling compacts and flushes for region at 1731566833450Disabling writes for close at 1731566833450Writing region close event to WAL at 1731566833451 (+1 ms)Closed at 1731566833451 2024-11-14T06:47:13,452 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T06:47:13,452 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-14T06:47:13,452 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-14T06:47:13,453 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T06:47:13,454 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-14T06:47:13,485 INFO [RS:0;20680646cf8a:45511 {}] regionserver.HRegionServer(746): ClusterId : 47521f9f-14e9-411e-b2f5-e37a0c18d456 2024-11-14T06:47:13,485 DEBUG [RS:0;20680646cf8a:45511 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-14T06:47:13,487 DEBUG [RS:0;20680646cf8a:45511 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-14T06:47:13,487 DEBUG [RS:0;20680646cf8a:45511 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-14T06:47:13,488 DEBUG [RS:0;20680646cf8a:45511 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-14T06:47:13,488 DEBUG [RS:0;20680646cf8a:45511 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17e75ea8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=20680646cf8a/172.17.0.2:0 2024-11-14T06:47:13,498 DEBUG [RS:0;20680646cf8a:45511 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;20680646cf8a:45511 2024-11-14T06:47:13,498 INFO [RS:0;20680646cf8a:45511 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-14T06:47:13,498 INFO [RS:0;20680646cf8a:45511 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-14T06:47:13,498 DEBUG [RS:0;20680646cf8a:45511 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-14T06:47:13,499 INFO [RS:0;20680646cf8a:45511 {}] regionserver.HRegionServer(2659): reportForDuty to master=20680646cf8a,34767,1731566833231 with port=45511, startcode=1731566833269 2024-11-14T06:47:13,499 DEBUG [RS:0;20680646cf8a:45511 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-14T06:47:13,501 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52285, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-14T06:47:13,501 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34767 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 20680646cf8a,45511,1731566833269 2024-11-14T06:47:13,501 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34767 {}] master.ServerManager(517): Registering regionserver=20680646cf8a,45511,1731566833269 2024-11-14T06:47:13,503 DEBUG [RS:0;20680646cf8a:45511 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa 2024-11-14T06:47:13,503 DEBUG [RS:0;20680646cf8a:45511 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37505 2024-11-14T06:47:13,503 DEBUG [RS:0;20680646cf8a:45511 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-14T06:47:13,504 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34767-0x1003cfc6aac0000, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T06:47:13,505 DEBUG [RS:0;20680646cf8a:45511 {}] zookeeper.ZKUtil(111): regionserver:45511-0x1003cfc6aac0001, quorum=127.0.0.1:52340, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/20680646cf8a,45511,1731566833269 2024-11-14T06:47:13,505 WARN [RS:0;20680646cf8a:45511 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T06:47:13,505 INFO [RS:0;20680646cf8a:45511 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T06:47:13,505 DEBUG [RS:0;20680646cf8a:45511 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/WALs/20680646cf8a,45511,1731566833269 2024-11-14T06:47:13,505 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [20680646cf8a,45511,1731566833269] 2024-11-14T06:47:13,509 INFO [RS:0;20680646cf8a:45511 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-14T06:47:13,511 INFO [RS:0;20680646cf8a:45511 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-14T06:47:13,511 INFO [RS:0;20680646cf8a:45511 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-14T06:47:13,511 INFO [RS:0;20680646cf8a:45511 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T06:47:13,511 INFO [RS:0;20680646cf8a:45511 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-14T06:47:13,512 INFO [RS:0;20680646cf8a:45511 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-14T06:47:13,512 INFO [RS:0;20680646cf8a:45511 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-14T06:47:13,512 DEBUG [RS:0;20680646cf8a:45511 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:47:13,512 DEBUG [RS:0;20680646cf8a:45511 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:47:13,512 DEBUG [RS:0;20680646cf8a:45511 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:47:13,512 DEBUG [RS:0;20680646cf8a:45511 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:47:13,512 DEBUG [RS:0;20680646cf8a:45511 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:47:13,513 DEBUG [RS:0;20680646cf8a:45511 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/20680646cf8a:0, corePoolSize=2, maxPoolSize=2 2024-11-14T06:47:13,513 DEBUG [RS:0;20680646cf8a:45511 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:47:13,513 DEBUG [RS:0;20680646cf8a:45511 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:47:13,513 DEBUG [RS:0;20680646cf8a:45511 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:47:13,513 DEBUG [RS:0;20680646cf8a:45511 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:47:13,513 DEBUG [RS:0;20680646cf8a:45511 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:47:13,513 DEBUG [RS:0;20680646cf8a:45511 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:47:13,513 DEBUG [RS:0;20680646cf8a:45511 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/20680646cf8a:0, corePoolSize=3, maxPoolSize=3 2024-11-14T06:47:13,513 DEBUG [RS:0;20680646cf8a:45511 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/20680646cf8a:0, corePoolSize=3, maxPoolSize=3 2024-11-14T06:47:13,513 INFO [RS:0;20680646cf8a:45511 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T06:47:13,513 INFO [RS:0;20680646cf8a:45511 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T06:47:13,514 INFO [RS:0;20680646cf8a:45511 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T06:47:13,514 INFO [RS:0;20680646cf8a:45511 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-14T06:47:13,514 INFO [RS:0;20680646cf8a:45511 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-14T06:47:13,514 INFO [RS:0;20680646cf8a:45511 {}] hbase.ChoreService(168): Chore ScheduledChore name=20680646cf8a,45511,1731566833269-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T06:47:13,529 INFO [RS:0;20680646cf8a:45511 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-14T06:47:13,529 INFO [RS:0;20680646cf8a:45511 {}] hbase.ChoreService(168): Chore ScheduledChore name=20680646cf8a,45511,1731566833269-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T06:47:13,529 INFO [RS:0;20680646cf8a:45511 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T06:47:13,529 INFO [RS:0;20680646cf8a:45511 {}] regionserver.Replication(171): 20680646cf8a,45511,1731566833269 started 2024-11-14T06:47:13,541 INFO [RS:0;20680646cf8a:45511 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T06:47:13,541 INFO [RS:0;20680646cf8a:45511 {}] regionserver.HRegionServer(1482): Serving as 20680646cf8a,45511,1731566833269, RpcServer on 20680646cf8a/172.17.0.2:45511, sessionid=0x1003cfc6aac0001 2024-11-14T06:47:13,541 DEBUG [RS:0;20680646cf8a:45511 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-14T06:47:13,541 DEBUG [RS:0;20680646cf8a:45511 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 20680646cf8a,45511,1731566833269 2024-11-14T06:47:13,541 DEBUG [RS:0;20680646cf8a:45511 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '20680646cf8a,45511,1731566833269' 2024-11-14T06:47:13,541 DEBUG [RS:0;20680646cf8a:45511 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-14T06:47:13,542 DEBUG [RS:0;20680646cf8a:45511 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-14T06:47:13,542 DEBUG [RS:0;20680646cf8a:45511 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-14T06:47:13,542 DEBUG [RS:0;20680646cf8a:45511 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-14T06:47:13,542 DEBUG [RS:0;20680646cf8a:45511 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 20680646cf8a,45511,1731566833269 2024-11-14T06:47:13,542 DEBUG [RS:0;20680646cf8a:45511 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '20680646cf8a,45511,1731566833269' 2024-11-14T06:47:13,542 DEBUG [RS:0;20680646cf8a:45511 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-14T06:47:13,543 DEBUG [RS:0;20680646cf8a:45511 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-14T06:47:13,543 DEBUG [RS:0;20680646cf8a:45511 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-14T06:47:13,543 INFO [RS:0;20680646cf8a:45511 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-14T06:47:13,543 INFO [RS:0;20680646cf8a:45511 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-14T06:47:13,605 WARN [20680646cf8a:34767 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-14T06:47:13,647 INFO [RS:0;20680646cf8a:45511 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=20680646cf8a%2C45511%2C1731566833269, suffix=, logDir=hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/WALs/20680646cf8a,45511,1731566833269, archiveDir=hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/oldWALs, maxLogs=32 2024-11-14T06:47:13,648 INFO [RS:0;20680646cf8a:45511 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 20680646cf8a%2C45511%2C1731566833269.1731566833648 2024-11-14T06:47:13,661 INFO [RS:0;20680646cf8a:45511 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/WALs/20680646cf8a,45511,1731566833269/20680646cf8a%2C45511%2C1731566833269.1731566833648 2024-11-14T06:47:13,663 DEBUG [RS:0;20680646cf8a:45511 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33507:33507),(127.0.0.1/127.0.0.1:33883:33883)] 2024-11-14T06:47:13,855 DEBUG [20680646cf8a:34767 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-14T06:47:13,856 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=20680646cf8a,45511,1731566833269 2024-11-14T06:47:13,859 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 20680646cf8a,45511,1731566833269, state=OPENING 2024-11-14T06:47:13,861 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-14T06:47:13,863 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34767-0x1003cfc6aac0000, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:47:13,863 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45511-0x1003cfc6aac0001, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:47:13,864 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T06:47:13,864 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T06:47:13,864 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T06:47:13,865 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=20680646cf8a,45511,1731566833269}] 2024-11-14T06:47:14,019 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-14T06:47:14,021 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58229, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-14T06:47:14,024 INFO [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-14T06:47:14,024 INFO [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T06:47:14,026 INFO [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=20680646cf8a%2C45511%2C1731566833269.meta, suffix=.meta, logDir=hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/WALs/20680646cf8a,45511,1731566833269, archiveDir=hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/oldWALs, maxLogs=32 2024-11-14T06:47:14,026 INFO [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 20680646cf8a%2C45511%2C1731566833269.meta.1731566834026.meta 2024-11-14T06:47:14,031 INFO [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/WALs/20680646cf8a,45511,1731566833269/20680646cf8a%2C45511%2C1731566833269.meta.1731566834026.meta 2024-11-14T06:47:14,033 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33883:33883),(127.0.0.1/127.0.0.1:33507:33507)] 2024-11-14T06:47:14,034 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-14T06:47:14,034 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-14T06:47:14,034 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-14T06:47:14,034 INFO [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-14T06:47:14,034 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-14T06:47:14,035 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T06:47:14,035 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-14T06:47:14,035 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-14T06:47:14,036 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T06:47:14,037 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T06:47:14,037 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:47:14,038 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T06:47:14,038 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T06:47:14,038 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T06:47:14,038 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:47:14,039 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T06:47:14,039 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T06:47:14,040 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T06:47:14,040 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:47:14,040 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T06:47:14,040 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T06:47:14,041 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T06:47:14,041 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:47:14,041 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T06:47:14,041 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T06:47:14,042 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/data/hbase/meta/1588230740 2024-11-14T06:47:14,043 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/data/hbase/meta/1588230740 2024-11-14T06:47:14,044 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T06:47:14,044 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T06:47:14,044 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T06:47:14,045 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T06:47:14,046 INFO [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=704440, jitterRate=-0.1042587012052536}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T06:47:14,046 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-14T06:47:14,047 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731566834035Writing region info on filesystem at 1731566834035Initializing all the Stores at 1731566834036 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731566834036Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731566834036Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731566834036Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731566834036Cleaning up temporary data from old regions at 1731566834044 (+8 ms)Running coprocessor post-open hooks at 1731566834046 (+2 ms)Region opened successfully at 1731566834047 (+1 ms) 2024-11-14T06:47:14,048 INFO [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731566834019 2024-11-14T06:47:14,050 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-14T06:47:14,050 INFO [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-14T06:47:14,051 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=20680646cf8a,45511,1731566833269 2024-11-14T06:47:14,052 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 20680646cf8a,45511,1731566833269, state=OPEN 2024-11-14T06:47:14,054 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=20680646cf8a,45511,1731566833269 2024-11-14T06:47:14,054 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45511-0x1003cfc6aac0001, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T06:47:14,054 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34767-0x1003cfc6aac0000, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T06:47:14,054 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T06:47:14,054 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T06:47:14,057 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-14T06:47:14,057 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=20680646cf8a,45511,1731566833269 in 190 msec 2024-11-14T06:47:14,059 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-14T06:47:14,059 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 605 msec 2024-11-14T06:47:14,060 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T06:47:14,060 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-14T06:47:14,061 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T06:47:14,061 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=20680646cf8a,45511,1731566833269, seqNum=-1] 2024-11-14T06:47:14,062 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T06:47:14,063 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44657, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T06:47:14,068 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 656 msec 2024-11-14T06:47:14,068 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731566834068, completionTime=-1 2024-11-14T06:47:14,068 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-14T06:47:14,068 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-14T06:47:14,070 INFO [master/20680646cf8a:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-14T06:47:14,070 INFO [master/20680646cf8a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731566894070 2024-11-14T06:47:14,070 INFO [master/20680646cf8a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731566954070 2024-11-14T06:47:14,071 INFO [master/20680646cf8a:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-14T06:47:14,071 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=20680646cf8a,34767,1731566833231-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T06:47:14,071 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=20680646cf8a,34767,1731566833231-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T06:47:14,071 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=20680646cf8a,34767,1731566833231-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T06:47:14,071 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-20680646cf8a:34767, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T06:47:14,071 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-14T06:47:14,071 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-14T06:47:14,073 DEBUG [master/20680646cf8a:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-14T06:47:14,075 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.778sec 2024-11-14T06:47:14,075 INFO [master/20680646cf8a:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-14T06:47:14,075 INFO [master/20680646cf8a:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-14T06:47:14,075 INFO [master/20680646cf8a:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-14T06:47:14,075 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-14T06:47:14,075 INFO [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-14T06:47:14,075 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=20680646cf8a,34767,1731566833231-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T06:47:14,075 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=20680646cf8a,34767,1731566833231-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-14T06:47:14,078 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-14T06:47:14,078 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-14T06:47:14,078 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=20680646cf8a,34767,1731566833231-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T06:47:14,085 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2ed5ac32, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T06:47:14,085 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 20680646cf8a,34767,-1 for getting cluster id 2024-11-14T06:47:14,085 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-14T06:47:14,086 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '47521f9f-14e9-411e-b2f5-e37a0c18d456' 2024-11-14T06:47:14,087 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-14T06:47:14,087 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "47521f9f-14e9-411e-b2f5-e37a0c18d456" 2024-11-14T06:47:14,087 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d9aeba1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T06:47:14,087 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [20680646cf8a,34767,-1] 2024-11-14T06:47:14,087 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-14T06:47:14,088 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T06:47:14,089 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35248, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-14T06:47:14,090 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c7d522f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T06:47:14,090 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T06:47:14,091 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=20680646cf8a,45511,1731566833269, seqNum=-1] 2024-11-14T06:47:14,091 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T06:47:14,092 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56478, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T06:47:14,094 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=20680646cf8a,34767,1731566833231 2024-11-14T06:47:14,094 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T06:47:14,097 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-14T06:47:14,097 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-14T06:47:14,098 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is 20680646cf8a,34767,1731566833231 2024-11-14T06:47:14,098 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@4442d12d 2024-11-14T06:47:14,099 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-14T06:47:14,100 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35256, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-14T06:47:14,100 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34767 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-14T06:47:14,100 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34767 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-14T06:47:14,101 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34767 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-14T06:47:14,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34767 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-14T06:47:14,103 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-14T06:47:14,103 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:47:14,103 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34767 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-11-14T06:47:14,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34767 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-14T06:47:14,104 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-14T06:47:14,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40215 is added to blk_1073741835_1011 (size=405) 2024-11-14T06:47:14,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34469 is added to blk_1073741835_1011 (size=405) 2024-11-14T06:47:14,111 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 166f46e0191977f403f01f6b307e4b9c, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731566834100.166f46e0191977f403f01f6b307e4b9c.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa 2024-11-14T06:47:14,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34469 is added to blk_1073741836_1012 (size=88) 2024-11-14T06:47:14,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40215 is added to blk_1073741836_1012 (size=88) 2024-11-14T06:47:14,117 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731566834100.166f46e0191977f403f01f6b307e4b9c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T06:47:14,118 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing 166f46e0191977f403f01f6b307e4b9c, disabling compactions & flushes 2024-11-14T06:47:14,118 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731566834100.166f46e0191977f403f01f6b307e4b9c. 2024-11-14T06:47:14,118 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731566834100.166f46e0191977f403f01f6b307e4b9c. 2024-11-14T06:47:14,118 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731566834100.166f46e0191977f403f01f6b307e4b9c. after waiting 0 ms 2024-11-14T06:47:14,118 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731566834100.166f46e0191977f403f01f6b307e4b9c. 2024-11-14T06:47:14,118 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731566834100.166f46e0191977f403f01f6b307e4b9c. 2024-11-14T06:47:14,118 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 166f46e0191977f403f01f6b307e4b9c: Waiting for close lock at 1731566834118Disabling compacts and flushes for region at 1731566834118Disabling writes for close at 1731566834118Writing region close event to WAL at 1731566834118Closed at 1731566834118 2024-11-14T06:47:14,119 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-14T06:47:14,119 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731566834100.166f46e0191977f403f01f6b307e4b9c.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1731566834119"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731566834119"}]},"ts":"1731566834119"} 2024-11-14T06:47:14,121 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-14T06:47:14,122 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-14T06:47:14,122 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731566834122"}]},"ts":"1731566834122"} 2024-11-14T06:47:14,124 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-11-14T06:47:14,124 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=166f46e0191977f403f01f6b307e4b9c, ASSIGN}] 2024-11-14T06:47:14,126 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=166f46e0191977f403f01f6b307e4b9c, ASSIGN 2024-11-14T06:47:14,127 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=166f46e0191977f403f01f6b307e4b9c, ASSIGN; state=OFFLINE, location=20680646cf8a,45511,1731566833269; forceNewPlan=false, retain=false 2024-11-14T06:47:14,278 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=166f46e0191977f403f01f6b307e4b9c, regionState=OPENING, regionLocation=20680646cf8a,45511,1731566833269 2024-11-14T06:47:14,283 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:14,283 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:14,285 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=166f46e0191977f403f01f6b307e4b9c, ASSIGN because future has completed 2024-11-14T06:47:14,286 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 166f46e0191977f403f01f6b307e4b9c, server=20680646cf8a,45511,1731566833269}] 2024-11-14T06:47:14,450 INFO [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731566834100.166f46e0191977f403f01f6b307e4b9c. 2024-11-14T06:47:14,450 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 166f46e0191977f403f01f6b307e4b9c, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731566834100.166f46e0191977f403f01f6b307e4b9c.', STARTKEY => '', ENDKEY => ''} 2024-11-14T06:47:14,451 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling 166f46e0191977f403f01f6b307e4b9c 2024-11-14T06:47:14,451 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731566834100.166f46e0191977f403f01f6b307e4b9c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T06:47:14,451 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 166f46e0191977f403f01f6b307e4b9c 2024-11-14T06:47:14,451 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 166f46e0191977f403f01f6b307e4b9c 2024-11-14T06:47:14,454 INFO [StoreOpener-166f46e0191977f403f01f6b307e4b9c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 166f46e0191977f403f01f6b307e4b9c 2024-11-14T06:47:14,457 INFO [StoreOpener-166f46e0191977f403f01f6b307e4b9c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 166f46e0191977f403f01f6b307e4b9c columnFamilyName info 2024-11-14T06:47:14,457 DEBUG [StoreOpener-166f46e0191977f403f01f6b307e4b9c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:47:14,458 INFO [StoreOpener-166f46e0191977f403f01f6b307e4b9c-1 {}] regionserver.HStore(327): Store=166f46e0191977f403f01f6b307e4b9c/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T06:47:14,458 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 166f46e0191977f403f01f6b307e4b9c 2024-11-14T06:47:14,460 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/166f46e0191977f403f01f6b307e4b9c 2024-11-14T06:47:14,461 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/166f46e0191977f403f01f6b307e4b9c 2024-11-14T06:47:14,461 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 166f46e0191977f403f01f6b307e4b9c 2024-11-14T06:47:14,461 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 166f46e0191977f403f01f6b307e4b9c 2024-11-14T06:47:14,464 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 166f46e0191977f403f01f6b307e4b9c 2024-11-14T06:47:14,467 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/166f46e0191977f403f01f6b307e4b9c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T06:47:14,468 INFO [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 166f46e0191977f403f01f6b307e4b9c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=773382, jitterRate=-0.01659516990184784}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-14T06:47:14,468 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 166f46e0191977f403f01f6b307e4b9c 2024-11-14T06:47:14,469 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 166f46e0191977f403f01f6b307e4b9c: Running coprocessor pre-open hook at 1731566834452Writing region info on filesystem at 1731566834452Initializing all the Stores at 1731566834453 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731566834453Cleaning up temporary data from old regions at 1731566834461 (+8 ms)Running coprocessor post-open hooks at 1731566834468 (+7 ms)Region opened successfully at 1731566834469 (+1 ms) 2024-11-14T06:47:14,470 INFO [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731566834100.166f46e0191977f403f01f6b307e4b9c., pid=6, masterSystemTime=1731566834442 2024-11-14T06:47:14,472 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731566834100.166f46e0191977f403f01f6b307e4b9c. 2024-11-14T06:47:14,472 INFO [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731566834100.166f46e0191977f403f01f6b307e4b9c. 2024-11-14T06:47:14,473 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=166f46e0191977f403f01f6b307e4b9c, regionState=OPEN, openSeqNum=2, regionLocation=20680646cf8a,45511,1731566833269 2024-11-14T06:47:14,475 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 166f46e0191977f403f01f6b307e4b9c, server=20680646cf8a,45511,1731566833269 because future has completed 2024-11-14T06:47:14,479 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-14T06:47:14,479 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 166f46e0191977f403f01f6b307e4b9c, server=20680646cf8a,45511,1731566833269 in 190 msec 2024-11-14T06:47:14,482 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-14T06:47:14,482 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=166f46e0191977f403f01f6b307e4b9c, ASSIGN in 355 msec 2024-11-14T06:47:14,483 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-14T06:47:14,483 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731566834483"}]},"ts":"1731566834483"} 2024-11-14T06:47:14,485 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-11-14T06:47:14,486 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-14T06:47:14,488 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 385 msec 2024-11-14T06:47:14,528 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-14T06:47:14,528 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-11-14T06:47:14,529 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-14T06:47:15,285 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:15,285 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:16,286 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:16,286 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:17,287 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:17,287 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:18,289 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:18,289 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:19,290 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:19,290 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:19,540 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-14T06:47:19,542 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:47:19,543 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:47:19,544 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:47:19,544 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:47:19,545 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:47:19,546 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:47:19,566 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:47:19,566 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:47:19,566 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:47:19,567 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:47:19,567 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:47:19,567 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:47:19,570 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:47:19,571 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:47:19,571 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:47:19,573 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:47:19,578 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-14T06:47:19,579 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-11-14T06:47:20,291 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:20,291 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:21,293 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:21,293 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:22,293 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:22,293 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:23,295 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:23,295 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:24,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34767 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-14T06:47:24,146 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-14T06:47:24,146 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-11-14T06:47:24,154 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-14T06:47:24,154 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731566834100.166f46e0191977f403f01f6b307e4b9c. 2024-11-14T06:47:24,158 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731566834100.166f46e0191977f403f01f6b307e4b9c., hostname=20680646cf8a,45511,1731566833269, seqNum=2] 2024-11-14T06:47:24,167 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34767 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-14T06:47:24,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34767 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-14T06:47:24,172 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-14T06:47:24,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34767 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-14T06:47:24,173 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-14T06:47:24,175 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-14T06:47:24,297 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:24,297 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:24,339 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45511 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-14T06:47:24,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/20680646cf8a:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731566834100.166f46e0191977f403f01f6b307e4b9c. 2024-11-14T06:47:24,340 INFO [RS_FLUSH_OPERATIONS-regionserver/20680646cf8a:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 166f46e0191977f403f01f6b307e4b9c 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-14T06:47:24,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/20680646cf8a:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/166f46e0191977f403f01f6b307e4b9c/.tmp/info/76a4cd729b4f425e97756bb3b955729a is 1080, key is row0001/info:/1731566844160/Put/seqid=0 2024-11-14T06:47:24,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40215 is added to blk_1073741837_1013 (size=6033) 2024-11-14T06:47:24,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34469 is added to blk_1073741837_1013 (size=6033) 2024-11-14T06:47:24,369 INFO [RS_FLUSH_OPERATIONS-regionserver/20680646cf8a:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/166f46e0191977f403f01f6b307e4b9c/.tmp/info/76a4cd729b4f425e97756bb3b955729a 2024-11-14T06:47:24,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/20680646cf8a:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/166f46e0191977f403f01f6b307e4b9c/.tmp/info/76a4cd729b4f425e97756bb3b955729a as hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/166f46e0191977f403f01f6b307e4b9c/info/76a4cd729b4f425e97756bb3b955729a 2024-11-14T06:47:24,380 INFO [RS_FLUSH_OPERATIONS-regionserver/20680646cf8a:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/166f46e0191977f403f01f6b307e4b9c/info/76a4cd729b4f425e97756bb3b955729a, entries=1, sequenceid=5, filesize=5.9 K 2024-11-14T06:47:24,381 INFO [RS_FLUSH_OPERATIONS-regionserver/20680646cf8a:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 166f46e0191977f403f01f6b307e4b9c in 41ms, sequenceid=5, compaction requested=false 2024-11-14T06:47:24,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/20680646cf8a:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 166f46e0191977f403f01f6b307e4b9c: 2024-11-14T06:47:24,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/20680646cf8a:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731566834100.166f46e0191977f403f01f6b307e4b9c. 2024-11-14T06:47:24,383 DEBUG [RS_FLUSH_OPERATIONS-regionserver/20680646cf8a:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-14T06:47:24,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34767 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-14T06:47:24,389 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-14T06:47:24,389 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 211 msec 2024-11-14T06:47:24,391 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 222 msec 2024-11-14T06:47:25,298 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:25,298 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:26,299 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:26,299 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:27,300 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:27,300 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:28,302 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:28,302 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:29,303 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:29,303 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:30,305 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:30,305 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:31,306 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:31,306 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:32,308 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:32,308 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:33,309 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:33,309 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:34,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34767 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-14T06:47:34,256 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-14T06:47:34,264 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34767 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-14T06:47:34,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34767 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-14T06:47:34,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34767 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-14T06:47:34,267 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-14T06:47:34,267 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-14T06:47:34,268 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-14T06:47:34,310 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:34,310 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:34,422 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45511 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-11-14T06:47:34,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/20680646cf8a:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731566834100.166f46e0191977f403f01f6b307e4b9c. 2024-11-14T06:47:34,424 INFO [RS_FLUSH_OPERATIONS-regionserver/20680646cf8a:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing 166f46e0191977f403f01f6b307e4b9c 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-14T06:47:34,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/20680646cf8a:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/166f46e0191977f403f01f6b307e4b9c/.tmp/info/c77fb701772847d3a37350905227497a is 1080, key is row0002/info:/1731566854259/Put/seqid=0 2024-11-14T06:47:34,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40215 is added to blk_1073741838_1014 (size=6033) 2024-11-14T06:47:34,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34469 is added to blk_1073741838_1014 (size=6033) 2024-11-14T06:47:34,443 INFO [RS_FLUSH_OPERATIONS-regionserver/20680646cf8a:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/166f46e0191977f403f01f6b307e4b9c/.tmp/info/c77fb701772847d3a37350905227497a 2024-11-14T06:47:34,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/20680646cf8a:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/166f46e0191977f403f01f6b307e4b9c/.tmp/info/c77fb701772847d3a37350905227497a as hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/166f46e0191977f403f01f6b307e4b9c/info/c77fb701772847d3a37350905227497a 2024-11-14T06:47:34,458 INFO [RS_FLUSH_OPERATIONS-regionserver/20680646cf8a:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/166f46e0191977f403f01f6b307e4b9c/info/c77fb701772847d3a37350905227497a, entries=1, sequenceid=9, filesize=5.9 K 2024-11-14T06:47:34,459 INFO [RS_FLUSH_OPERATIONS-regionserver/20680646cf8a:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 166f46e0191977f403f01f6b307e4b9c in 36ms, sequenceid=9, compaction requested=false 2024-11-14T06:47:34,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/20680646cf8a:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for 166f46e0191977f403f01f6b307e4b9c: 2024-11-14T06:47:34,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/20680646cf8a:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731566834100.166f46e0191977f403f01f6b307e4b9c. 2024-11-14T06:47:34,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/20680646cf8a:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-11-14T06:47:34,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34767 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-11-14T06:47:34,464 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-14T06:47:34,464 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 193 msec 2024-11-14T06:47:34,466 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 201 msec 2024-11-14T06:47:35,311 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:35,311 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:36,313 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:36,313 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:37,314 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:37,314 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:38,316 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:38,316 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:39,317 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:39,317 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:39,318 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 after 68098ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor205.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:47:39,318 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta after 68082ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor205.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T06:47:40,319 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:40,319 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:41,321 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:41,321 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:42,322 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:42,322 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:43,212 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-14T06:47:43,323 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:43,323 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:44,325 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:44,325 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:44,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34767 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-14T06:47:44,366 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-14T06:47:44,373 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 20680646cf8a%2C45511%2C1731566833269.1731566864373 2024-11-14T06:47:44,386 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:47:44,386 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:47:44,386 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:47:44,387 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:47:44,387 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:47:44,387 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/WALs/20680646cf8a,45511,1731566833269/20680646cf8a%2C45511%2C1731566833269.1731566833648 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/WALs/20680646cf8a,45511,1731566833269/20680646cf8a%2C45511%2C1731566833269.1731566864373 2024-11-14T06:47:44,388 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33507:33507),(127.0.0.1/127.0.0.1:33883:33883)] 2024-11-14T06:47:44,388 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/WALs/20680646cf8a,45511,1731566833269/20680646cf8a%2C45511%2C1731566833269.1731566833648 is not closed yet, will try archiving it next time 2024-11-14T06:47:44,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34469 is added to blk_1073741833_1009 (size=5546) 2024-11-14T06:47:44,389 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34767 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-14T06:47:44,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40215 is added to blk_1073741833_1009 (size=5546) 2024-11-14T06:47:44,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34767 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-14T06:47:44,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34767 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-14T06:47:44,391 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-14T06:47:44,392 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-14T06:47:44,392 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-14T06:47:44,546 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45511 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-11-14T06:47:44,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/20680646cf8a:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731566834100.166f46e0191977f403f01f6b307e4b9c. 2024-11-14T06:47:44,547 INFO [RS_FLUSH_OPERATIONS-regionserver/20680646cf8a:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing 166f46e0191977f403f01f6b307e4b9c 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-14T06:47:44,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/20680646cf8a:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/166f46e0191977f403f01f6b307e4b9c/.tmp/info/6423ce53d7fa432fa4b9bd3fd9ceb7e4 is 1080, key is row0003/info:/1731566864369/Put/seqid=0 2024-11-14T06:47:44,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40215 is added to blk_1073741840_1016 (size=6033) 2024-11-14T06:47:44,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34469 is added to blk_1073741840_1016 (size=6033) 2024-11-14T06:47:44,562 INFO [RS_FLUSH_OPERATIONS-regionserver/20680646cf8a:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/166f46e0191977f403f01f6b307e4b9c/.tmp/info/6423ce53d7fa432fa4b9bd3fd9ceb7e4 2024-11-14T06:47:44,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/20680646cf8a:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/166f46e0191977f403f01f6b307e4b9c/.tmp/info/6423ce53d7fa432fa4b9bd3fd9ceb7e4 as hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/166f46e0191977f403f01f6b307e4b9c/info/6423ce53d7fa432fa4b9bd3fd9ceb7e4 2024-11-14T06:47:44,575 INFO [RS_FLUSH_OPERATIONS-regionserver/20680646cf8a:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/166f46e0191977f403f01f6b307e4b9c/info/6423ce53d7fa432fa4b9bd3fd9ceb7e4, entries=1, sequenceid=13, filesize=5.9 K 2024-11-14T06:47:44,576 INFO [RS_FLUSH_OPERATIONS-regionserver/20680646cf8a:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 166f46e0191977f403f01f6b307e4b9c in 29ms, sequenceid=13, compaction requested=true 2024-11-14T06:47:44,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/20680646cf8a:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for 166f46e0191977f403f01f6b307e4b9c: 2024-11-14T06:47:44,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/20680646cf8a:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731566834100.166f46e0191977f403f01f6b307e4b9c. 2024-11-14T06:47:44,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/20680646cf8a:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-11-14T06:47:44,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34767 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-11-14T06:47:44,581 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-14T06:47:44,581 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 186 msec 2024-11-14T06:47:44,584 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 193 msec 2024-11-14T06:47:45,326 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:45,326 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:46,328 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:46,328 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:47,328 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:47,328 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:48,329 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:48,329 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:49,330 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:49,330 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:50,332 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:50,332 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:51,332 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:51,332 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:52,334 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:52,334 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:53,334 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:53,334 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:54,088 INFO [master/20680646cf8a:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-14T06:47:54,088 INFO [master/20680646cf8a:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-14T06:47:54,335 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:54,335 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:54,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34767 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-14T06:47:54,446 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-14T06:47:54,446 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T06:47:54,450 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T06:47:54,451 DEBUG [Time-limited test {}] regionserver.HStore(1541): 166f46e0191977f403f01f6b307e4b9c/info is initiating minor compaction (all files) 2024-11-14T06:47:54,451 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-14T06:47:54,451 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T06:47:54,452 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 166f46e0191977f403f01f6b307e4b9c/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731566834100.166f46e0191977f403f01f6b307e4b9c. 2024-11-14T06:47:54,452 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/166f46e0191977f403f01f6b307e4b9c/info/76a4cd729b4f425e97756bb3b955729a, hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/166f46e0191977f403f01f6b307e4b9c/info/c77fb701772847d3a37350905227497a, hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/166f46e0191977f403f01f6b307e4b9c/info/6423ce53d7fa432fa4b9bd3fd9ceb7e4] into tmpdir=hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/166f46e0191977f403f01f6b307e4b9c/.tmp, totalSize=17.7 K 2024-11-14T06:47:54,454 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 76a4cd729b4f425e97756bb3b955729a, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1731566844160 2024-11-14T06:47:54,455 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting c77fb701772847d3a37350905227497a, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1731566854259 2024-11-14T06:47:54,456 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 6423ce53d7fa432fa4b9bd3fd9ceb7e4, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1731566864369 2024-11-14T06:47:54,466 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 166f46e0191977f403f01f6b307e4b9c#info#compaction#46 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T06:47:54,466 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/166f46e0191977f403f01f6b307e4b9c/.tmp/info/04d12e181c6d4194845a208ec3dc8533 is 1080, key is row0001/info:/1731566844160/Put/seqid=0 2024-11-14T06:47:54,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40215 is added to blk_1073741841_1017 (size=8296) 2024-11-14T06:47:54,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34469 is added to blk_1073741841_1017 (size=8296) 2024-11-14T06:47:54,478 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/166f46e0191977f403f01f6b307e4b9c/.tmp/info/04d12e181c6d4194845a208ec3dc8533 as hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/166f46e0191977f403f01f6b307e4b9c/info/04d12e181c6d4194845a208ec3dc8533 2024-11-14T06:47:54,483 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 166f46e0191977f403f01f6b307e4b9c/info of 166f46e0191977f403f01f6b307e4b9c into 04d12e181c6d4194845a208ec3dc8533(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T06:47:54,483 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 166f46e0191977f403f01f6b307e4b9c: 2024-11-14T06:47:54,486 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 20680646cf8a%2C45511%2C1731566833269.1731566874485 2024-11-14T06:47:54,492 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:47:54,492 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:47:54,492 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:47:54,492 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:47:54,492 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:47:54,493 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/WALs/20680646cf8a,45511,1731566833269/20680646cf8a%2C45511%2C1731566833269.1731566864373 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/WALs/20680646cf8a,45511,1731566833269/20680646cf8a%2C45511%2C1731566833269.1731566874485 2024-11-14T06:47:54,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34469 is added to blk_1073741839_1015 (size=2520) 2024-11-14T06:47:54,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40215 is added to blk_1073741839_1015 (size=2520) 2024-11-14T06:47:54,502 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/WALs/20680646cf8a,45511,1731566833269/20680646cf8a%2C45511%2C1731566833269.1731566833648 to hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/oldWALs/20680646cf8a%2C45511%2C1731566833269.1731566833648 2024-11-14T06:47:54,502 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33883:33883),(127.0.0.1/127.0.0.1:33507:33507)] 2024-11-14T06:47:54,502 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34767 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-14T06:47:54,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34767 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-14T06:47:54,505 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-14T06:47:54,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34767 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-14T06:47:54,506 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-14T06:47:54,506 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-14T06:47:54,658 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45511 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-11-14T06:47:54,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/20680646cf8a:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731566834100.166f46e0191977f403f01f6b307e4b9c. 2024-11-14T06:47:54,659 INFO [RS_FLUSH_OPERATIONS-regionserver/20680646cf8a:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing 166f46e0191977f403f01f6b307e4b9c 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-14T06:47:54,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/20680646cf8a:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/166f46e0191977f403f01f6b307e4b9c/.tmp/info/857fcfe9c3514c01a07526284fac0ac2 is 1080, key is row0000/info:/1731566874484/Put/seqid=0 2024-11-14T06:47:54,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34469 is added to blk_1073741843_1019 (size=6033) 2024-11-14T06:47:54,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40215 is added to blk_1073741843_1019 (size=6033) 2024-11-14T06:47:54,672 INFO [RS_FLUSH_OPERATIONS-regionserver/20680646cf8a:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/166f46e0191977f403f01f6b307e4b9c/.tmp/info/857fcfe9c3514c01a07526284fac0ac2 2024-11-14T06:47:54,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/20680646cf8a:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/166f46e0191977f403f01f6b307e4b9c/.tmp/info/857fcfe9c3514c01a07526284fac0ac2 as hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/166f46e0191977f403f01f6b307e4b9c/info/857fcfe9c3514c01a07526284fac0ac2 2024-11-14T06:47:54,687 INFO [RS_FLUSH_OPERATIONS-regionserver/20680646cf8a:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/166f46e0191977f403f01f6b307e4b9c/info/857fcfe9c3514c01a07526284fac0ac2, entries=1, sequenceid=18, filesize=5.9 K 2024-11-14T06:47:54,689 INFO [RS_FLUSH_OPERATIONS-regionserver/20680646cf8a:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 166f46e0191977f403f01f6b307e4b9c in 30ms, sequenceid=18, compaction requested=false 2024-11-14T06:47:54,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/20680646cf8a:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 166f46e0191977f403f01f6b307e4b9c: 2024-11-14T06:47:54,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/20680646cf8a:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731566834100.166f46e0191977f403f01f6b307e4b9c. 2024-11-14T06:47:54,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/20680646cf8a:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-14T06:47:54,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34767 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-14T06:47:54,693 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-14T06:47:54,693 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 185 msec 2024-11-14T06:47:54,696 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 191 msec 2024-11-14T06:47:55,337 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:55,337 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:56,338 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:56,338 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:57,339 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:57,339 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:58,341 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:58,341 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:59,341 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:59,341 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:47:59,451 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 166f46e0191977f403f01f6b307e4b9c, had cached 0 bytes from a total of 14329 2024-11-14T06:48:00,342 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:00,342 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:01,343 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:01,343 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:02,343 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:02,343 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:03,344 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:03,344 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:04,344 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:04,344 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:04,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34767 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-14T06:48:04,545 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-14T06:48:04,551 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 20680646cf8a%2C45511%2C1731566833269.1731566884551 2024-11-14T06:48:04,560 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:04,560 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:04,560 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:04,560 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:04,560 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:04,560 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/WALs/20680646cf8a,45511,1731566833269/20680646cf8a%2C45511%2C1731566833269.1731566874485 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/WALs/20680646cf8a,45511,1731566833269/20680646cf8a%2C45511%2C1731566833269.1731566884551 2024-11-14T06:48:04,561 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33883:33883),(127.0.0.1/127.0.0.1:33507:33507)] 2024-11-14T06:48:04,561 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/WALs/20680646cf8a,45511,1731566833269/20680646cf8a%2C45511%2C1731566833269.1731566874485 is not closed yet, will try archiving it next time 2024-11-14T06:48:04,561 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/WALs/20680646cf8a,45511,1731566833269/20680646cf8a%2C45511%2C1731566833269.1731566864373 to hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/oldWALs/20680646cf8a%2C45511%2C1731566833269.1731566864373 2024-11-14T06:48:04,561 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-14T06:48:04,561 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T06:48:04,561 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T06:48:04,561 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T06:48:04,561 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T06:48:04,561 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-14T06:48:04,562 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-14T06:48:04,562 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1971934903, stopped=false 2024-11-14T06:48:04,562 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=20680646cf8a,34767,1731566833231 2024-11-14T06:48:04,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40215 is added to blk_1073741842_1018 (size=2026) 2024-11-14T06:48:04,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34469 is added to blk_1073741842_1018 (size=2026) 2024-11-14T06:48:04,565 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45511-0x1003cfc6aac0001, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T06:48:04,565 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45511-0x1003cfc6aac0001, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:48:04,565 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T06:48:04,565 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34767-0x1003cfc6aac0000, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T06:48:04,565 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34767-0x1003cfc6aac0000, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:48:04,566 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T06:48:04,566 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T06:48:04,566 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T06:48:04,566 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45511-0x1003cfc6aac0001, quorum=127.0.0.1:52340, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T06:48:04,566 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '20680646cf8a,45511,1731566833269' ***** 2024-11-14T06:48:04,566 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-14T06:48:04,566 INFO [RS:0;20680646cf8a:45511 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-14T06:48:04,566 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-14T06:48:04,566 INFO [RS:0;20680646cf8a:45511 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-14T06:48:04,566 INFO [RS:0;20680646cf8a:45511 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-14T06:48:04,566 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34767-0x1003cfc6aac0000, quorum=127.0.0.1:52340, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T06:48:04,566 INFO [RS:0;20680646cf8a:45511 {}] regionserver.HRegionServer(3091): Received CLOSE for 166f46e0191977f403f01f6b307e4b9c 2024-11-14T06:48:04,568 INFO [RS:0;20680646cf8a:45511 {}] regionserver.HRegionServer(959): stopping server 20680646cf8a,45511,1731566833269 2024-11-14T06:48:04,568 INFO [RS:0;20680646cf8a:45511 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T06:48:04,568 INFO [RS:0;20680646cf8a:45511 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;20680646cf8a:45511. 2024-11-14T06:48:04,568 DEBUG [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 166f46e0191977f403f01f6b307e4b9c, disabling compactions & flushes 2024-11-14T06:48:04,568 DEBUG [RS:0;20680646cf8a:45511 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T06:48:04,568 DEBUG [RS:0;20680646cf8a:45511 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T06:48:04,568 INFO [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731566834100.166f46e0191977f403f01f6b307e4b9c. 2024-11-14T06:48:04,568 DEBUG [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731566834100.166f46e0191977f403f01f6b307e4b9c. 2024-11-14T06:48:04,568 DEBUG [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731566834100.166f46e0191977f403f01f6b307e4b9c. after waiting 0 ms 2024-11-14T06:48:04,568 DEBUG [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731566834100.166f46e0191977f403f01f6b307e4b9c. 2024-11-14T06:48:04,568 INFO [RS:0;20680646cf8a:45511 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-14T06:48:04,568 INFO [RS:0;20680646cf8a:45511 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-14T06:48:04,568 INFO [RS:0;20680646cf8a:45511 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-14T06:48:04,568 INFO [RS:0;20680646cf8a:45511 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-14T06:48:04,568 INFO [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 166f46e0191977f403f01f6b307e4b9c 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-14T06:48:04,570 INFO [RS:0;20680646cf8a:45511 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-14T06:48:04,570 DEBUG [RS:0;20680646cf8a:45511 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 166f46e0191977f403f01f6b307e4b9c=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731566834100.166f46e0191977f403f01f6b307e4b9c.} 2024-11-14T06:48:04,570 DEBUG [RS:0;20680646cf8a:45511 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 166f46e0191977f403f01f6b307e4b9c 2024-11-14T06:48:04,570 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T06:48:04,570 INFO [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T06:48:04,570 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T06:48:04,570 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T06:48:04,570 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T06:48:04,570 INFO [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-11-14T06:48:04,573 DEBUG [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/166f46e0191977f403f01f6b307e4b9c/.tmp/info/7244791a9d5f41fd906cf4c1f4740722 is 1080, key is row0001/info:/1731566884549/Put/seqid=0 2024-11-14T06:48:04,580 INFO [regionserver/20680646cf8a:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-14T06:48:04,581 INFO [regionserver/20680646cf8a:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-14T06:48:04,587 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/data/hbase/meta/1588230740/.tmp/info/916cfae5b5334caea3a4e7cd31374c82 is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731566834100.166f46e0191977f403f01f6b307e4b9c./info:regioninfo/1731566834473/Put/seqid=0 2024-11-14T06:48:04,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34469 is added to blk_1073741845_1021 (size=6033) 2024-11-14T06:48:04,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40215 is added to blk_1073741845_1021 (size=6033) 2024-11-14T06:48:04,595 INFO [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/166f46e0191977f403f01f6b307e4b9c/.tmp/info/7244791a9d5f41fd906cf4c1f4740722 2024-11-14T06:48:04,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40215 is added to blk_1073741846_1022 (size=7308) 2024-11-14T06:48:04,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34469 is added to blk_1073741846_1022 (size=7308) 2024-11-14T06:48:04,599 INFO [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/data/hbase/meta/1588230740/.tmp/info/916cfae5b5334caea3a4e7cd31374c82 2024-11-14T06:48:04,603 DEBUG [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/166f46e0191977f403f01f6b307e4b9c/.tmp/info/7244791a9d5f41fd906cf4c1f4740722 as hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/166f46e0191977f403f01f6b307e4b9c/info/7244791a9d5f41fd906cf4c1f4740722 2024-11-14T06:48:04,610 INFO [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/166f46e0191977f403f01f6b307e4b9c/info/7244791a9d5f41fd906cf4c1f4740722, entries=1, sequenceid=22, filesize=5.9 K 2024-11-14T06:48:04,611 INFO [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 166f46e0191977f403f01f6b307e4b9c in 43ms, sequenceid=22, compaction requested=true 2024-11-14T06:48:04,612 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731566834100.166f46e0191977f403f01f6b307e4b9c.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/166f46e0191977f403f01f6b307e4b9c/info/76a4cd729b4f425e97756bb3b955729a, hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/166f46e0191977f403f01f6b307e4b9c/info/c77fb701772847d3a37350905227497a, hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/166f46e0191977f403f01f6b307e4b9c/info/6423ce53d7fa432fa4b9bd3fd9ceb7e4] to archive 2024-11-14T06:48:04,613 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731566834100.166f46e0191977f403f01f6b307e4b9c.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-14T06:48:04,615 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731566834100.166f46e0191977f403f01f6b307e4b9c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/166f46e0191977f403f01f6b307e4b9c/info/76a4cd729b4f425e97756bb3b955729a to hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/166f46e0191977f403f01f6b307e4b9c/info/76a4cd729b4f425e97756bb3b955729a 2024-11-14T06:48:04,617 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731566834100.166f46e0191977f403f01f6b307e4b9c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/166f46e0191977f403f01f6b307e4b9c/info/c77fb701772847d3a37350905227497a to hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/166f46e0191977f403f01f6b307e4b9c/info/c77fb701772847d3a37350905227497a 2024-11-14T06:48:04,618 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731566834100.166f46e0191977f403f01f6b307e4b9c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/166f46e0191977f403f01f6b307e4b9c/info/6423ce53d7fa432fa4b9bd3fd9ceb7e4 to hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/166f46e0191977f403f01f6b307e4b9c/info/6423ce53d7fa432fa4b9bd3fd9ceb7e4 2024-11-14T06:48:04,619 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731566834100.166f46e0191977f403f01f6b307e4b9c.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=20680646cf8a:34767 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-14T06:48:04,619 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731566834100.166f46e0191977f403f01f6b307e4b9c.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [76a4cd729b4f425e97756bb3b955729a=6033, c77fb701772847d3a37350905227497a=6033, 6423ce53d7fa432fa4b9bd3fd9ceb7e4=6033] 2024-11-14T06:48:04,626 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/data/hbase/meta/1588230740/.tmp/ns/ba6d5ab169784d27a072c809cb47bb7f is 43, key is default/ns:d/1731566834063/Put/seqid=0 2024-11-14T06:48:04,640 DEBUG [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/166f46e0191977f403f01f6b307e4b9c/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-11-14T06:48:04,640 INFO [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731566834100.166f46e0191977f403f01f6b307e4b9c. 2024-11-14T06:48:04,640 DEBUG [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 166f46e0191977f403f01f6b307e4b9c: Waiting for close lock at 1731566884568Running coprocessor pre-close hooks at 1731566884568Disabling compacts and flushes for region at 1731566884568Disabling writes for close at 1731566884568Obtaining lock to block concurrent updates at 1731566884568Preparing flush snapshotting stores in 166f46e0191977f403f01f6b307e4b9c at 1731566884568Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731566834100.166f46e0191977f403f01f6b307e4b9c., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1731566884568Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731566834100.166f46e0191977f403f01f6b307e4b9c. at 1731566884569 (+1 ms)Flushing 166f46e0191977f403f01f6b307e4b9c/info: creating writer at 1731566884569Flushing 166f46e0191977f403f01f6b307e4b9c/info: appending metadata at 1731566884572 (+3 ms)Flushing 166f46e0191977f403f01f6b307e4b9c/info: closing flushed file at 1731566884572Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@55873528: reopening flushed file at 1731566884602 (+30 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 166f46e0191977f403f01f6b307e4b9c in 43ms, sequenceid=22, compaction requested=true at 1731566884611 (+9 ms)Writing region close event to WAL at 1731566884620 (+9 ms)Running coprocessor post-close hooks at 1731566884640 (+20 ms)Closed at 1731566884640 2024-11-14T06:48:04,641 DEBUG [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731566834100.166f46e0191977f403f01f6b307e4b9c. 2024-11-14T06:48:04,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34469 is added to blk_1073741847_1023 (size=5153) 2024-11-14T06:48:04,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40215 is added to blk_1073741847_1023 (size=5153) 2024-11-14T06:48:04,649 INFO [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/data/hbase/meta/1588230740/.tmp/ns/ba6d5ab169784d27a072c809cb47bb7f 2024-11-14T06:48:04,676 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/data/hbase/meta/1588230740/.tmp/table/3955af89057a4caf8a7812f07ad060e0 is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1731566834483/Put/seqid=0 2024-11-14T06:48:04,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34469 is added to blk_1073741848_1024 (size=5508) 2024-11-14T06:48:04,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40215 is added to blk_1073741848_1024 (size=5508) 2024-11-14T06:48:04,687 INFO [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/data/hbase/meta/1588230740/.tmp/table/3955af89057a4caf8a7812f07ad060e0 2024-11-14T06:48:04,695 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/data/hbase/meta/1588230740/.tmp/info/916cfae5b5334caea3a4e7cd31374c82 as hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/data/hbase/meta/1588230740/info/916cfae5b5334caea3a4e7cd31374c82 2024-11-14T06:48:04,705 INFO [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/data/hbase/meta/1588230740/info/916cfae5b5334caea3a4e7cd31374c82, entries=10, sequenceid=11, filesize=7.1 K 2024-11-14T06:48:04,706 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/data/hbase/meta/1588230740/.tmp/ns/ba6d5ab169784d27a072c809cb47bb7f as hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/data/hbase/meta/1588230740/ns/ba6d5ab169784d27a072c809cb47bb7f 2024-11-14T06:48:04,714 INFO [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/data/hbase/meta/1588230740/ns/ba6d5ab169784d27a072c809cb47bb7f, entries=2, sequenceid=11, filesize=5.0 K 2024-11-14T06:48:04,715 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/data/hbase/meta/1588230740/.tmp/table/3955af89057a4caf8a7812f07ad060e0 as hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/data/hbase/meta/1588230740/table/3955af89057a4caf8a7812f07ad060e0 2024-11-14T06:48:04,729 INFO [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/data/hbase/meta/1588230740/table/3955af89057a4caf8a7812f07ad060e0, entries=2, sequenceid=11, filesize=5.4 K 2024-11-14T06:48:04,730 INFO [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 160ms, sequenceid=11, compaction requested=false 2024-11-14T06:48:04,753 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-14T06:48:04,754 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T06:48:04,754 INFO [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T06:48:04,754 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731566884570Running coprocessor pre-close hooks at 1731566884570Disabling compacts and flushes for region at 1731566884570Disabling writes for close at 1731566884570Obtaining lock to block concurrent updates at 1731566884570Preparing flush snapshotting stores in 1588230740 at 1731566884570Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1731566884571 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731566884571Flushing 1588230740/info: creating writer at 1731566884572 (+1 ms)Flushing 1588230740/info: appending metadata at 1731566884587 (+15 ms)Flushing 1588230740/info: closing flushed file at 1731566884587Flushing 1588230740/ns: creating writer at 1731566884605 (+18 ms)Flushing 1588230740/ns: appending metadata at 1731566884626 (+21 ms)Flushing 1588230740/ns: closing flushed file at 1731566884626Flushing 1588230740/table: creating writer at 1731566884656 (+30 ms)Flushing 1588230740/table: appending metadata at 1731566884675 (+19 ms)Flushing 1588230740/table: closing flushed file at 1731566884675Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@a7dc2c4: reopening flushed file at 1731566884694 (+19 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@440e38b: reopening flushed file at 1731566884705 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@34b2605f: reopening flushed file at 1731566884714 (+9 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 160ms, sequenceid=11, compaction requested=false at 1731566884730 (+16 ms)Writing region close event to WAL at 1731566884750 (+20 ms)Running coprocessor post-close hooks at 1731566884754 (+4 ms)Closed at 1731566884754 2024-11-14T06:48:04,754 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-14T06:48:04,770 INFO [RS:0;20680646cf8a:45511 {}] regionserver.HRegionServer(976): stopping server 20680646cf8a,45511,1731566833269; all regions closed. 2024-11-14T06:48:04,789 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:04,789 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:04,789 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:04,789 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:04,790 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:04,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40215 is added to blk_1073741834_1010 (size=3306) 2024-11-14T06:48:04,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34469 is added to blk_1073741834_1010 (size=3306) 2024-11-14T06:48:04,794 DEBUG [RS:0;20680646cf8a:45511 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/oldWALs 2024-11-14T06:48:04,794 INFO [RS:0;20680646cf8a:45511 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 20680646cf8a%2C45511%2C1731566833269.meta:.meta(num 1731566834026) 2024-11-14T06:48:04,795 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:04,795 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:04,795 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:04,795 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:04,795 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:04,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40215 is added to blk_1073741844_1020 (size=1252) 2024-11-14T06:48:04,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34469 is added to blk_1073741844_1020 (size=1252) 2024-11-14T06:48:04,963 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/WALs/20680646cf8a,45511,1731566833269/20680646cf8a%2C45511%2C1731566833269.1731566874485 to hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/oldWALs/20680646cf8a%2C45511%2C1731566833269.1731566874485 2024-11-14T06:48:04,966 DEBUG [RS:0;20680646cf8a:45511 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/oldWALs 2024-11-14T06:48:04,966 INFO [RS:0;20680646cf8a:45511 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 20680646cf8a%2C45511%2C1731566833269:(num 1731566884551) 2024-11-14T06:48:04,966 DEBUG [RS:0;20680646cf8a:45511 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T06:48:04,966 INFO [RS:0;20680646cf8a:45511 {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T06:48:04,966 INFO [RS:0;20680646cf8a:45511 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T06:48:04,966 INFO [RS:0;20680646cf8a:45511 {}] hbase.ChoreService(370): Chore service for: regionserver/20680646cf8a:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-14T06:48:04,966 INFO [RS:0;20680646cf8a:45511 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T06:48:04,967 INFO [regionserver/20680646cf8a:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T06:48:04,967 INFO [RS:0;20680646cf8a:45511 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45511 2024-11-14T06:48:04,968 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34767-0x1003cfc6aac0000, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T06:48:04,968 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45511-0x1003cfc6aac0001, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/20680646cf8a,45511,1731566833269 2024-11-14T06:48:04,968 INFO [RS:0;20680646cf8a:45511 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T06:48:04,969 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [20680646cf8a,45511,1731566833269] 2024-11-14T06:48:04,970 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/20680646cf8a,45511,1731566833269 already deleted, retry=false 2024-11-14T06:48:04,970 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 20680646cf8a,45511,1731566833269 expired; onlineServers=0 2024-11-14T06:48:04,970 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '20680646cf8a,34767,1731566833231' ***** 2024-11-14T06:48:04,970 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-14T06:48:04,970 INFO [M:0;20680646cf8a:34767 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T06:48:04,970 INFO [M:0;20680646cf8a:34767 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T06:48:04,970 DEBUG [M:0;20680646cf8a:34767 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-14T06:48:04,970 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-14T06:48:04,970 DEBUG [M:0;20680646cf8a:34767 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-14T06:48:04,970 DEBUG [master/20680646cf8a:0:becomeActiveMaster-HFileCleaner.large.0-1731566833415 {}] cleaner.HFileCleaner(306): Exit Thread[master/20680646cf8a:0:becomeActiveMaster-HFileCleaner.large.0-1731566833415,5,FailOnTimeoutGroup] 2024-11-14T06:48:04,970 DEBUG [master/20680646cf8a:0:becomeActiveMaster-HFileCleaner.small.0-1731566833415 {}] cleaner.HFileCleaner(306): Exit Thread[master/20680646cf8a:0:becomeActiveMaster-HFileCleaner.small.0-1731566833415,5,FailOnTimeoutGroup] 2024-11-14T06:48:04,970 INFO [M:0;20680646cf8a:34767 {}] hbase.ChoreService(370): Chore service for: master/20680646cf8a:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-14T06:48:04,970 INFO [M:0;20680646cf8a:34767 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T06:48:04,970 DEBUG [M:0;20680646cf8a:34767 {}] master.HMaster(1795): Stopping service threads 2024-11-14T06:48:04,970 INFO [M:0;20680646cf8a:34767 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-14T06:48:04,970 INFO [M:0;20680646cf8a:34767 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T06:48:04,971 INFO [M:0;20680646cf8a:34767 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-14T06:48:04,971 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-14T06:48:04,971 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34767-0x1003cfc6aac0000, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-14T06:48:04,971 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34767-0x1003cfc6aac0000, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:48:04,972 DEBUG [M:0;20680646cf8a:34767 {}] zookeeper.ZKUtil(347): master:34767-0x1003cfc6aac0000, quorum=127.0.0.1:52340, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-14T06:48:04,972 WARN [M:0;20680646cf8a:34767 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-14T06:48:04,972 INFO [M:0;20680646cf8a:34767 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/.lastflushedseqids 2024-11-14T06:48:04,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34469 is added to blk_1073741849_1025 (size=130) 2024-11-14T06:48:04,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40215 is added to blk_1073741849_1025 (size=130) 2024-11-14T06:48:04,987 INFO [M:0;20680646cf8a:34767 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-14T06:48:04,987 INFO [M:0;20680646cf8a:34767 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-14T06:48:04,987 DEBUG [M:0;20680646cf8a:34767 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T06:48:04,987 INFO [M:0;20680646cf8a:34767 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T06:48:04,987 DEBUG [M:0;20680646cf8a:34767 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T06:48:04,987 DEBUG [M:0;20680646cf8a:34767 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T06:48:04,987 DEBUG [M:0;20680646cf8a:34767 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T06:48:04,987 INFO [M:0;20680646cf8a:34767 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.60 KB heapSize=55.01 KB 2024-11-14T06:48:05,010 DEBUG [M:0;20680646cf8a:34767 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/bdf0e29ddf3f49fcb951121d161483f1 is 82, key is hbase:meta,,1/info:regioninfo/1731566834051/Put/seqid=0 2024-11-14T06:48:05,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34469 is added to blk_1073741850_1026 (size=5672) 2024-11-14T06:48:05,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40215 is added to blk_1073741850_1026 (size=5672) 2024-11-14T06:48:05,016 INFO [M:0;20680646cf8a:34767 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/bdf0e29ddf3f49fcb951121d161483f1 2024-11-14T06:48:05,037 DEBUG [M:0;20680646cf8a:34767 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e62263c88e3647fbaeafc119932c3327 is 799, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731566834488/Put/seqid=0 2024-11-14T06:48:05,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34469 is added to blk_1073741851_1027 (size=7824) 2024-11-14T06:48:05,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40215 is added to blk_1073741851_1027 (size=7824) 2024-11-14T06:48:05,043 INFO [M:0;20680646cf8a:34767 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=43.00 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e62263c88e3647fbaeafc119932c3327 2024-11-14T06:48:05,049 INFO [M:0;20680646cf8a:34767 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for e62263c88e3647fbaeafc119932c3327 2024-11-14T06:48:05,063 DEBUG [M:0;20680646cf8a:34767 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/62fd11fb20e145029e1465f5d499071d is 69, key is 20680646cf8a,45511,1731566833269/rs:state/1731566833502/Put/seqid=0 2024-11-14T06:48:05,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40215 is added to blk_1073741852_1028 (size=5156) 2024-11-14T06:48:05,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34469 is added to blk_1073741852_1028 (size=5156) 2024-11-14T06:48:05,069 INFO [RS:0;20680646cf8a:45511 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T06:48:05,069 INFO [RS:0;20680646cf8a:45511 {}] regionserver.HRegionServer(1031): Exiting; stopping=20680646cf8a,45511,1731566833269; zookeeper connection closed. 2024-11-14T06:48:05,069 INFO [M:0;20680646cf8a:34767 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/62fd11fb20e145029e1465f5d499071d 2024-11-14T06:48:05,069 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45511-0x1003cfc6aac0001, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T06:48:05,069 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45511-0x1003cfc6aac0001, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T06:48:05,070 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@305d9ac5 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@305d9ac5 2024-11-14T06:48:05,070 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-14T06:48:05,100 DEBUG [M:0;20680646cf8a:34767 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/eee3d5d577f04343b83b8a4eca93174c is 52, key is load_balancer_on/state:d/1731566834096/Put/seqid=0 2024-11-14T06:48:05,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40215 is added to blk_1073741853_1029 (size=5056) 2024-11-14T06:48:05,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34469 is added to blk_1073741853_1029 (size=5056) 2024-11-14T06:48:05,107 INFO [M:0;20680646cf8a:34767 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/eee3d5d577f04343b83b8a4eca93174c 2024-11-14T06:48:05,113 DEBUG [M:0;20680646cf8a:34767 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/bdf0e29ddf3f49fcb951121d161483f1 as hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/bdf0e29ddf3f49fcb951121d161483f1 2024-11-14T06:48:05,120 INFO [M:0;20680646cf8a:34767 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/bdf0e29ddf3f49fcb951121d161483f1, entries=8, sequenceid=121, filesize=5.5 K 2024-11-14T06:48:05,122 DEBUG [M:0;20680646cf8a:34767 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e62263c88e3647fbaeafc119932c3327 as hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/e62263c88e3647fbaeafc119932c3327 2024-11-14T06:48:05,127 INFO [M:0;20680646cf8a:34767 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for e62263c88e3647fbaeafc119932c3327 2024-11-14T06:48:05,127 INFO [M:0;20680646cf8a:34767 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/e62263c88e3647fbaeafc119932c3327, entries=14, sequenceid=121, filesize=7.6 K 2024-11-14T06:48:05,128 DEBUG [M:0;20680646cf8a:34767 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/62fd11fb20e145029e1465f5d499071d as hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/62fd11fb20e145029e1465f5d499071d 2024-11-14T06:48:05,135 INFO [M:0;20680646cf8a:34767 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/62fd11fb20e145029e1465f5d499071d, entries=1, sequenceid=121, filesize=5.0 K 2024-11-14T06:48:05,136 DEBUG [M:0;20680646cf8a:34767 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/eee3d5d577f04343b83b8a4eca93174c as hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/eee3d5d577f04343b83b8a4eca93174c 2024-11-14T06:48:05,142 INFO [M:0;20680646cf8a:34767 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37505/user/jenkins/test-data/d6a54cd0-e56d-50e7-5252-0ae6af17aeaa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/eee3d5d577f04343b83b8a4eca93174c, entries=1, sequenceid=121, filesize=4.9 K 2024-11-14T06:48:05,143 INFO [M:0;20680646cf8a:34767 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.60 KB/44650, heapSize ~54.95 KB/56264, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 156ms, sequenceid=121, compaction requested=false 2024-11-14T06:48:05,146 INFO [M:0;20680646cf8a:34767 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T06:48:05,146 DEBUG [M:0;20680646cf8a:34767 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731566884987Disabling compacts and flushes for region at 1731566884987Disabling writes for close at 1731566884987Obtaining lock to block concurrent updates at 1731566884987Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731566884987Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44650, getHeapSize=56264, getOffHeapSize=0, getCellsCount=140 at 1731566884988 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731566884988Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731566884988Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731566885009 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731566885009Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731566885021 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731566885036 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731566885036Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731566885049 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731566885063 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731566885063Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731566885075 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731566885099 (+24 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731566885099Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@12038584: reopening flushed file at 1731566885112 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1ba11e23: reopening flushed file at 1731566885121 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3da1fc2d: reopening flushed file at 1731566885127 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@288fe163: reopening flushed file at 1731566885135 (+8 ms)Finished flush of dataSize ~43.60 KB/44650, heapSize ~54.95 KB/56264, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 156ms, sequenceid=121, compaction requested=false at 1731566885143 (+8 ms)Writing region close event to WAL at 1731566885146 (+3 ms)Closed at 1731566885146 2024-11-14T06:48:05,147 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:05,147 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:05,147 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:05,147 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:05,147 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:05,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34469 is added to blk_1073741830_1006 (size=53047) 2024-11-14T06:48:05,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40215 is added to blk_1073741830_1006 (size=53047) 2024-11-14T06:48:05,152 INFO [M:0;20680646cf8a:34767 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-14T06:48:05,152 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T06:48:05,152 INFO [M:0;20680646cf8a:34767 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34767 2024-11-14T06:48:05,152 INFO [M:0;20680646cf8a:34767 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T06:48:05,253 INFO [M:0;20680646cf8a:34767 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T06:48:05,253 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34767-0x1003cfc6aac0000, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T06:48:05,253 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34767-0x1003cfc6aac0000, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T06:48:05,257 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@73476a0a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T06:48:05,257 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@35023063{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T06:48:05,257 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T06:48:05,257 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@206f042f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T06:48:05,258 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4ed77c81{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45d68592-c015-287b-353c-c1c74f818e22/hadoop.log.dir/,STOPPED} 2024-11-14T06:48:05,260 WARN [BP-1653618985-172.17.0.2-1731566832670 heartbeating to localhost/127.0.0.1:37505 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T06:48:05,260 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T06:48:05,260 WARN [BP-1653618985-172.17.0.2-1731566832670 heartbeating to localhost/127.0.0.1:37505 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1653618985-172.17.0.2-1731566832670 (Datanode Uuid 5187d187-9484-4741-9e8f-5c147f7cf165) service to localhost/127.0.0.1:37505 2024-11-14T06:48:05,260 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T06:48:05,261 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45d68592-c015-287b-353c-c1c74f818e22/cluster_96f36b60-77a4-0898-83c2-780ff865c39a/data/data3/current/BP-1653618985-172.17.0.2-1731566832670 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T06:48:05,261 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45d68592-c015-287b-353c-c1c74f818e22/cluster_96f36b60-77a4-0898-83c2-780ff865c39a/data/data4/current/BP-1653618985-172.17.0.2-1731566832670 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T06:48:05,262 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T06:48:05,265 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1d6dee42{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T06:48:05,266 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3df88721{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T06:48:05,266 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T06:48:05,266 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1fa04e54{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T06:48:05,266 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@168edd0a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45d68592-c015-287b-353c-c1c74f818e22/hadoop.log.dir/,STOPPED} 2024-11-14T06:48:05,269 WARN [BP-1653618985-172.17.0.2-1731566832670 heartbeating to localhost/127.0.0.1:37505 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T06:48:05,269 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T06:48:05,269 WARN [BP-1653618985-172.17.0.2-1731566832670 heartbeating to localhost/127.0.0.1:37505 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1653618985-172.17.0.2-1731566832670 (Datanode Uuid 29635134-ca14-4c5e-b4f1-22ea7f2be238) service to localhost/127.0.0.1:37505 2024-11-14T06:48:05,269 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T06:48:05,273 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45d68592-c015-287b-353c-c1c74f818e22/cluster_96f36b60-77a4-0898-83c2-780ff865c39a/data/data1/current/BP-1653618985-172.17.0.2-1731566832670 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T06:48:05,274 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45d68592-c015-287b-353c-c1c74f818e22/cluster_96f36b60-77a4-0898-83c2-780ff865c39a/data/data2/current/BP-1653618985-172.17.0.2-1731566832670 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T06:48:05,274 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T06:48:05,283 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4e2e10dc{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T06:48:05,283 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3cb009a1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T06:48:05,283 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T06:48:05,283 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@16208fe2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T06:48:05,283 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3ece2bc8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45d68592-c015-287b-353c-c1c74f818e22/hadoop.log.dir/,STOPPED} 2024-11-14T06:48:05,293 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-14T06:48:05,322 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-14T06:48:05,333 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=206 (was 179) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:37505 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37505 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/20680646cf8a:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37505 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:37505 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:37505 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37505 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:37505 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:37505 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37505 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=483 (was 455) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=52 (was 80), ProcessCount=11 (was 11), AvailableMemoryMB=1090 (was 260) - AvailableMemoryMB LEAK? - 2024-11-14T06:48:05,343 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=206, OpenFileDescriptor=483, MaxFileDescriptor=1048576, SystemLoadAverage=52, ProcessCount=11, AvailableMemoryMB=1089 2024-11-14T06:48:05,343 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-14T06:48:05,343 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45d68592-c015-287b-353c-c1c74f818e22/hadoop.log.dir so I do NOT create it in target/test-data/2e243ac2-bb22-2a9a-7458-441c2025024a 2024-11-14T06:48:05,343 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45d68592-c015-287b-353c-c1c74f818e22/hadoop.tmp.dir so I do NOT create it in target/test-data/2e243ac2-bb22-2a9a-7458-441c2025024a 2024-11-14T06:48:05,343 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2e243ac2-bb22-2a9a-7458-441c2025024a/cluster_66668d32-f255-2691-2a87-36d2a9a81533, deleteOnExit=true 2024-11-14T06:48:05,343 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-14T06:48:05,344 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2e243ac2-bb22-2a9a-7458-441c2025024a/test.cache.data in system properties and HBase conf 2024-11-14T06:48:05,344 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2e243ac2-bb22-2a9a-7458-441c2025024a/hadoop.tmp.dir in system properties and HBase conf 2024-11-14T06:48:05,344 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2e243ac2-bb22-2a9a-7458-441c2025024a/hadoop.log.dir in system properties and HBase conf 2024-11-14T06:48:05,344 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2e243ac2-bb22-2a9a-7458-441c2025024a/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-14T06:48:05,344 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2e243ac2-bb22-2a9a-7458-441c2025024a/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-14T06:48:05,344 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-14T06:48:05,344 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-14T06:48:05,345 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2e243ac2-bb22-2a9a-7458-441c2025024a/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-14T06:48:05,345 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2e243ac2-bb22-2a9a-7458-441c2025024a/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-14T06:48:05,345 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2e243ac2-bb22-2a9a-7458-441c2025024a/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-14T06:48:05,345 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2e243ac2-bb22-2a9a-7458-441c2025024a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T06:48:05,345 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2e243ac2-bb22-2a9a-7458-441c2025024a/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-14T06:48:05,345 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2e243ac2-bb22-2a9a-7458-441c2025024a/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-14T06:48:05,345 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2e243ac2-bb22-2a9a-7458-441c2025024a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T06:48:05,345 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2e243ac2-bb22-2a9a-7458-441c2025024a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T06:48:05,345 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2e243ac2-bb22-2a9a-7458-441c2025024a/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-14T06:48:05,345 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2e243ac2-bb22-2a9a-7458-441c2025024a/nfs.dump.dir in system properties and HBase conf 2024-11-14T06:48:05,345 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:05,345 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2e243ac2-bb22-2a9a-7458-441c2025024a/java.io.tmpdir in system properties and HBase conf 2024-11-14T06:48:05,346 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2e243ac2-bb22-2a9a-7458-441c2025024a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T06:48:05,346 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2e243ac2-bb22-2a9a-7458-441c2025024a/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-14T06:48:05,345 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:05,346 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2e243ac2-bb22-2a9a-7458-441c2025024a/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-14T06:48:05,364 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T06:48:05,417 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T06:48:05,422 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T06:48:05,424 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T06:48:05,424 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T06:48:05,424 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T06:48:05,425 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T06:48:05,426 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1fda4535{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2e243ac2-bb22-2a9a-7458-441c2025024a/hadoop.log.dir/,AVAILABLE} 2024-11-14T06:48:05,426 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4b767eb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T06:48:05,517 INFO [regionserver/20680646cf8a:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T06:48:05,522 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4bdca924{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2e243ac2-bb22-2a9a-7458-441c2025024a/java.io.tmpdir/jetty-localhost-36467-hadoop-hdfs-3_4_1-tests_jar-_-any-7740194560380417958/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T06:48:05,523 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@27fb1a0a{HTTP/1.1, (http/1.1)}{localhost:36467} 2024-11-14T06:48:05,523 INFO [Time-limited test {}] server.Server(415): Started @237120ms 2024-11-14T06:48:05,537 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T06:48:05,594 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T06:48:05,596 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T06:48:05,597 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T06:48:05,597 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T06:48:05,597 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T06:48:05,598 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6f8f17a1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2e243ac2-bb22-2a9a-7458-441c2025024a/hadoop.log.dir/,AVAILABLE} 2024-11-14T06:48:05,598 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4a5ea7cd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T06:48:05,690 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@45bda0cb{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2e243ac2-bb22-2a9a-7458-441c2025024a/java.io.tmpdir/jetty-localhost-46071-hadoop-hdfs-3_4_1-tests_jar-_-any-15847259463742888086/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T06:48:05,690 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@79864455{HTTP/1.1, (http/1.1)}{localhost:46071} 2024-11-14T06:48:05,690 INFO [Time-limited test {}] server.Server(415): Started @237287ms 2024-11-14T06:48:05,691 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T06:48:05,717 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T06:48:05,719 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T06:48:05,721 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T06:48:05,721 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T06:48:05,721 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T06:48:05,722 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2735da07{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2e243ac2-bb22-2a9a-7458-441c2025024a/hadoop.log.dir/,AVAILABLE} 2024-11-14T06:48:05,722 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2de80e16{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T06:48:05,746 WARN [Thread-1964 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2e243ac2-bb22-2a9a-7458-441c2025024a/cluster_66668d32-f255-2691-2a87-36d2a9a81533/data/data2/current/BP-2010868496-172.17.0.2-1731566885368/current, will proceed with Du for space computation calculation, 2024-11-14T06:48:05,746 WARN [Thread-1963 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2e243ac2-bb22-2a9a-7458-441c2025024a/cluster_66668d32-f255-2691-2a87-36d2a9a81533/data/data1/current/BP-2010868496-172.17.0.2-1731566885368/current, will proceed with Du for space computation calculation, 2024-11-14T06:48:05,765 WARN [Thread-1942 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T06:48:05,767 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbf308fe7bb6e2bd2 with lease ID 0x24374bb12259e7a7: Processing first storage report for DS-a815e2d6-aad6-4a29-a170-a418d22dc35a from datanode DatanodeRegistration(127.0.0.1:41929, datanodeUuid=b6813ce8-6b2a-45b1-b7c6-5468599a678c, infoPort=42521, infoSecurePort=0, ipcPort=35291, storageInfo=lv=-57;cid=testClusterID;nsid=2062371966;c=1731566885368) 2024-11-14T06:48:05,767 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbf308fe7bb6e2bd2 with lease ID 0x24374bb12259e7a7: from storage DS-a815e2d6-aad6-4a29-a170-a418d22dc35a node DatanodeRegistration(127.0.0.1:41929, datanodeUuid=b6813ce8-6b2a-45b1-b7c6-5468599a678c, infoPort=42521, infoSecurePort=0, ipcPort=35291, storageInfo=lv=-57;cid=testClusterID;nsid=2062371966;c=1731566885368), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T06:48:05,767 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbf308fe7bb6e2bd2 with lease ID 0x24374bb12259e7a7: Processing first storage report for DS-e3ac4504-5585-4540-8bd5-3a2e6d70181f from datanode DatanodeRegistration(127.0.0.1:41929, datanodeUuid=b6813ce8-6b2a-45b1-b7c6-5468599a678c, infoPort=42521, infoSecurePort=0, ipcPort=35291, storageInfo=lv=-57;cid=testClusterID;nsid=2062371966;c=1731566885368) 2024-11-14T06:48:05,767 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbf308fe7bb6e2bd2 with lease ID 0x24374bb12259e7a7: from storage DS-e3ac4504-5585-4540-8bd5-3a2e6d70181f node DatanodeRegistration(127.0.0.1:41929, datanodeUuid=b6813ce8-6b2a-45b1-b7c6-5468599a678c, infoPort=42521, infoSecurePort=0, ipcPort=35291, storageInfo=lv=-57;cid=testClusterID;nsid=2062371966;c=1731566885368), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T06:48:05,816 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@550f8afc{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2e243ac2-bb22-2a9a-7458-441c2025024a/java.io.tmpdir/jetty-localhost-34643-hadoop-hdfs-3_4_1-tests_jar-_-any-13643504063324999118/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T06:48:05,816 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1b28e221{HTTP/1.1, (http/1.1)}{localhost:34643} 2024-11-14T06:48:05,816 INFO [Time-limited test {}] server.Server(415): Started @237413ms 2024-11-14T06:48:05,817 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T06:48:05,872 WARN [Thread-1989 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2e243ac2-bb22-2a9a-7458-441c2025024a/cluster_66668d32-f255-2691-2a87-36d2a9a81533/data/data3/current/BP-2010868496-172.17.0.2-1731566885368/current, will proceed with Du for space computation calculation, 2024-11-14T06:48:05,872 WARN [Thread-1990 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2e243ac2-bb22-2a9a-7458-441c2025024a/cluster_66668d32-f255-2691-2a87-36d2a9a81533/data/data4/current/BP-2010868496-172.17.0.2-1731566885368/current, will proceed with Du for space computation calculation, 2024-11-14T06:48:05,888 WARN [Thread-1978 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T06:48:05,890 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x82a1ffc97921a6f7 with lease ID 0x24374bb12259e7a8: Processing first storage report for DS-f2007617-059c-40c0-b894-9f1cd1521dd9 from datanode DatanodeRegistration(127.0.0.1:46407, datanodeUuid=a9cb4c57-78c0-44d3-9f60-ddd309de3458, infoPort=39903, infoSecurePort=0, ipcPort=41735, storageInfo=lv=-57;cid=testClusterID;nsid=2062371966;c=1731566885368) 2024-11-14T06:48:05,890 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x82a1ffc97921a6f7 with lease ID 0x24374bb12259e7a8: from storage DS-f2007617-059c-40c0-b894-9f1cd1521dd9 node DatanodeRegistration(127.0.0.1:46407, datanodeUuid=a9cb4c57-78c0-44d3-9f60-ddd309de3458, infoPort=39903, infoSecurePort=0, ipcPort=41735, storageInfo=lv=-57;cid=testClusterID;nsid=2062371966;c=1731566885368), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T06:48:05,890 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x82a1ffc97921a6f7 with lease ID 0x24374bb12259e7a8: Processing first storage report for DS-714e424a-f846-4d22-8bbc-02fac909ee94 from datanode DatanodeRegistration(127.0.0.1:46407, datanodeUuid=a9cb4c57-78c0-44d3-9f60-ddd309de3458, infoPort=39903, infoSecurePort=0, ipcPort=41735, storageInfo=lv=-57;cid=testClusterID;nsid=2062371966;c=1731566885368) 2024-11-14T06:48:05,890 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x82a1ffc97921a6f7 with lease ID 0x24374bb12259e7a8: from storage DS-714e424a-f846-4d22-8bbc-02fac909ee94 node DatanodeRegistration(127.0.0.1:46407, datanodeUuid=a9cb4c57-78c0-44d3-9f60-ddd309de3458, infoPort=39903, infoSecurePort=0, ipcPort=41735, storageInfo=lv=-57;cid=testClusterID;nsid=2062371966;c=1731566885368), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T06:48:05,937 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2e243ac2-bb22-2a9a-7458-441c2025024a 2024-11-14T06:48:05,939 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2e243ac2-bb22-2a9a-7458-441c2025024a/cluster_66668d32-f255-2691-2a87-36d2a9a81533/zookeeper_0, clientPort=56956, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2e243ac2-bb22-2a9a-7458-441c2025024a/cluster_66668d32-f255-2691-2a87-36d2a9a81533/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2e243ac2-bb22-2a9a-7458-441c2025024a/cluster_66668d32-f255-2691-2a87-36d2a9a81533/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-14T06:48:05,940 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=56956 2024-11-14T06:48:05,940 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T06:48:05,941 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T06:48:05,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741825_1001 (size=7) 2024-11-14T06:48:05,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41929 is added to blk_1073741825_1001 (size=7) 2024-11-14T06:48:05,952 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0 with version=8 2024-11-14T06:48:05,952 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/hbase-staging 2024-11-14T06:48:05,953 INFO [Time-limited test {}] client.ConnectionUtils(128): master/20680646cf8a:0 server-side Connection retries=45 2024-11-14T06:48:05,953 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T06:48:05,954 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T06:48:05,954 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T06:48:05,954 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T06:48:05,954 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T06:48:05,954 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-14T06:48:05,954 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T06:48:05,954 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:32979 2024-11-14T06:48:05,956 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:32979 connecting to ZooKeeper ensemble=127.0.0.1:56956 2024-11-14T06:48:05,960 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:329790x0, quorum=127.0.0.1:56956, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T06:48:05,964 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:32979-0x1003cfd38a10000 connected 2024-11-14T06:48:05,978 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T06:48:05,979 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T06:48:05,982 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:32979-0x1003cfd38a10000, quorum=127.0.0.1:56956, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T06:48:05,982 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0, hbase.cluster.distributed=false 2024-11-14T06:48:05,983 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:32979-0x1003cfd38a10000, quorum=127.0.0.1:56956, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T06:48:05,985 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=32979 2024-11-14T06:48:05,985 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=32979 2024-11-14T06:48:05,987 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=32979 2024-11-14T06:48:05,990 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=32979 2024-11-14T06:48:05,992 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=32979 2024-11-14T06:48:06,006 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/20680646cf8a:0 server-side Connection retries=45 2024-11-14T06:48:06,006 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T06:48:06,006 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T06:48:06,006 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T06:48:06,006 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T06:48:06,006 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T06:48:06,006 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-14T06:48:06,006 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T06:48:06,009 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39097 2024-11-14T06:48:06,010 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39097 connecting to ZooKeeper ensemble=127.0.0.1:56956 2024-11-14T06:48:06,011 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T06:48:06,012 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T06:48:06,015 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:390970x0, quorum=127.0.0.1:56956, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T06:48:06,016 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39097-0x1003cfd38a10001 connected 2024-11-14T06:48:06,016 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39097-0x1003cfd38a10001, quorum=127.0.0.1:56956, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T06:48:06,016 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-14T06:48:06,016 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-14T06:48:06,017 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39097-0x1003cfd38a10001, quorum=127.0.0.1:56956, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-14T06:48:06,018 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39097-0x1003cfd38a10001, quorum=127.0.0.1:56956, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T06:48:06,019 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39097 2024-11-14T06:48:06,023 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39097 2024-11-14T06:48:06,024 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39097 2024-11-14T06:48:06,024 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39097 2024-11-14T06:48:06,025 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39097 2024-11-14T06:48:06,037 DEBUG [M:0;20680646cf8a:32979 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;20680646cf8a:32979 2024-11-14T06:48:06,038 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/20680646cf8a,32979,1731566885953 2024-11-14T06:48:06,039 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39097-0x1003cfd38a10001, quorum=127.0.0.1:56956, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T06:48:06,039 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32979-0x1003cfd38a10000, quorum=127.0.0.1:56956, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T06:48:06,039 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:32979-0x1003cfd38a10000, quorum=127.0.0.1:56956, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/20680646cf8a,32979,1731566885953 2024-11-14T06:48:06,040 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39097-0x1003cfd38a10001, quorum=127.0.0.1:56956, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-14T06:48:06,040 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32979-0x1003cfd38a10000, quorum=127.0.0.1:56956, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:48:06,040 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39097-0x1003cfd38a10001, quorum=127.0.0.1:56956, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:48:06,040 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:32979-0x1003cfd38a10000, quorum=127.0.0.1:56956, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-14T06:48:06,041 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/20680646cf8a,32979,1731566885953 from backup master directory 2024-11-14T06:48:06,041 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32979-0x1003cfd38a10000, quorum=127.0.0.1:56956, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/20680646cf8a,32979,1731566885953 2024-11-14T06:48:06,041 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39097-0x1003cfd38a10001, quorum=127.0.0.1:56956, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T06:48:06,041 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32979-0x1003cfd38a10000, quorum=127.0.0.1:56956, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T06:48:06,041 WARN [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T06:48:06,041 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=20680646cf8a,32979,1731566885953 2024-11-14T06:48:06,045 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/hbase.id] with ID: 460ddda4-a294-4efa-ae60-7be59c698400 2024-11-14T06:48:06,045 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/.tmp/hbase.id 2024-11-14T06:48:06,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41929 is added to blk_1073741826_1002 (size=42) 2024-11-14T06:48:06,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741826_1002 (size=42) 2024-11-14T06:48:06,057 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/.tmp/hbase.id]:[hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/hbase.id] 2024-11-14T06:48:06,068 INFO [master/20680646cf8a:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T06:48:06,068 INFO [master/20680646cf8a:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-14T06:48:06,069 INFO [master/20680646cf8a:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-14T06:48:06,070 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39097-0x1003cfd38a10001, quorum=127.0.0.1:56956, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:48:06,070 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32979-0x1003cfd38a10000, quorum=127.0.0.1:56956, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:48:06,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741827_1003 (size=196) 2024-11-14T06:48:06,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41929 is added to blk_1073741827_1003 (size=196) 2024-11-14T06:48:06,079 INFO [master/20680646cf8a:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-14T06:48:06,080 INFO [master/20680646cf8a:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-14T06:48:06,080 INFO [master/20680646cf8a:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T06:48:06,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741828_1004 (size=1189) 2024-11-14T06:48:06,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41929 is added to blk_1073741828_1004 (size=1189) 2024-11-14T06:48:06,088 INFO [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/MasterData/data/master/store 2024-11-14T06:48:06,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741829_1005 (size=34) 2024-11-14T06:48:06,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41929 is added to blk_1073741829_1005 (size=34) 2024-11-14T06:48:06,095 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T06:48:06,095 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T06:48:06,095 INFO [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T06:48:06,095 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T06:48:06,095 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T06:48:06,095 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T06:48:06,095 INFO [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T06:48:06,095 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731566886095Disabling compacts and flushes for region at 1731566886095Disabling writes for close at 1731566886095Writing region close event to WAL at 1731566886095Closed at 1731566886095 2024-11-14T06:48:06,096 WARN [master/20680646cf8a:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/MasterData/data/master/store/.initializing 2024-11-14T06:48:06,096 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/MasterData/WALs/20680646cf8a,32979,1731566885953 2024-11-14T06:48:06,099 INFO [master/20680646cf8a:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=20680646cf8a%2C32979%2C1731566885953, suffix=, logDir=hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/MasterData/WALs/20680646cf8a,32979,1731566885953, archiveDir=hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/MasterData/oldWALs, maxLogs=10 2024-11-14T06:48:06,099 INFO [master/20680646cf8a:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 20680646cf8a%2C32979%2C1731566885953.1731566886099 2024-11-14T06:48:06,104 INFO [master/20680646cf8a:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/MasterData/WALs/20680646cf8a,32979,1731566885953/20680646cf8a%2C32979%2C1731566885953.1731566886099 2024-11-14T06:48:06,105 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39903:39903),(127.0.0.1/127.0.0.1:42521:42521)] 2024-11-14T06:48:06,109 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-14T06:48:06,109 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T06:48:06,109 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:48:06,109 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:48:06,111 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:48:06,112 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-14T06:48:06,112 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:48:06,113 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T06:48:06,113 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:48:06,114 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-14T06:48:06,114 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:48:06,115 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T06:48:06,115 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:48:06,116 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-14T06:48:06,116 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:48:06,116 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T06:48:06,116 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:48:06,118 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-14T06:48:06,118 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:48:06,118 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T06:48:06,118 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:48:06,119 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:48:06,120 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:48:06,121 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:48:06,121 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:48:06,121 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-14T06:48:06,123 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:48:06,125 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T06:48:06,125 INFO [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=727974, jitterRate=-0.07433359324932098}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-14T06:48:06,126 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731566886109Initializing all the Stores at 1731566886110 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731566886110Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731566886111 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731566886111Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731566886111Cleaning up temporary data from old regions at 1731566886121 (+10 ms)Region opened successfully at 1731566886126 (+5 ms) 2024-11-14T06:48:06,126 INFO [master/20680646cf8a:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-14T06:48:06,130 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5209c766, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=20680646cf8a/172.17.0.2:0 2024-11-14T06:48:06,131 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-14T06:48:06,131 INFO [master/20680646cf8a:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-14T06:48:06,131 INFO [master/20680646cf8a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-14T06:48:06,131 INFO [master/20680646cf8a:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-14T06:48:06,132 INFO [master/20680646cf8a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-14T06:48:06,132 INFO [master/20680646cf8a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-14T06:48:06,132 INFO [master/20680646cf8a:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-14T06:48:06,135 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-14T06:48:06,135 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32979-0x1003cfd38a10000, quorum=127.0.0.1:56956, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-14T06:48:06,136 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-14T06:48:06,137 INFO [master/20680646cf8a:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-14T06:48:06,137 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32979-0x1003cfd38a10000, quorum=127.0.0.1:56956, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-14T06:48:06,138 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-14T06:48:06,138 INFO [master/20680646cf8a:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-14T06:48:06,139 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32979-0x1003cfd38a10000, quorum=127.0.0.1:56956, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-14T06:48:06,139 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-14T06:48:06,140 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32979-0x1003cfd38a10000, quorum=127.0.0.1:56956, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-14T06:48:06,141 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-14T06:48:06,143 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32979-0x1003cfd38a10000, quorum=127.0.0.1:56956, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-14T06:48:06,143 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-14T06:48:06,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32979-0x1003cfd38a10000, quorum=127.0.0.1:56956, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T06:48:06,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39097-0x1003cfd38a10001, quorum=127.0.0.1:56956, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T06:48:06,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32979-0x1003cfd38a10000, quorum=127.0.0.1:56956, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:48:06,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39097-0x1003cfd38a10001, quorum=127.0.0.1:56956, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:48:06,145 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=20680646cf8a,32979,1731566885953, sessionid=0x1003cfd38a10000, setting cluster-up flag (Was=false) 2024-11-14T06:48:06,147 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39097-0x1003cfd38a10001, quorum=127.0.0.1:56956, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:48:06,147 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32979-0x1003cfd38a10000, quorum=127.0.0.1:56956, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:48:06,149 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-14T06:48:06,150 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=20680646cf8a,32979,1731566885953 2024-11-14T06:48:06,151 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39097-0x1003cfd38a10001, quorum=127.0.0.1:56956, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:48:06,151 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32979-0x1003cfd38a10000, quorum=127.0.0.1:56956, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:48:06,154 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-14T06:48:06,155 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=20680646cf8a,32979,1731566885953 2024-11-14T06:48:06,156 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-14T06:48:06,158 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-14T06:48:06,158 INFO [master/20680646cf8a:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-14T06:48:06,158 INFO [master/20680646cf8a:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-14T06:48:06,159 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 20680646cf8a,32979,1731566885953 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-14T06:48:06,160 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/20680646cf8a:0, corePoolSize=5, maxPoolSize=5 2024-11-14T06:48:06,160 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/20680646cf8a:0, corePoolSize=5, maxPoolSize=5 2024-11-14T06:48:06,160 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/20680646cf8a:0, corePoolSize=5, maxPoolSize=5 2024-11-14T06:48:06,160 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/20680646cf8a:0, corePoolSize=5, maxPoolSize=5 2024-11-14T06:48:06,160 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/20680646cf8a:0, corePoolSize=10, maxPoolSize=10 2024-11-14T06:48:06,160 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:48:06,160 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/20680646cf8a:0, corePoolSize=2, maxPoolSize=2 2024-11-14T06:48:06,160 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:48:06,162 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T06:48:06,162 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-14T06:48:06,163 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:48:06,163 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-14T06:48:06,166 INFO [master/20680646cf8a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731566916166 2024-11-14T06:48:06,166 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-14T06:48:06,166 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-14T06:48:06,166 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-14T06:48:06,166 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-14T06:48:06,166 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-14T06:48:06,166 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-14T06:48:06,166 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T06:48:06,167 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-14T06:48:06,167 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-14T06:48:06,167 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-14T06:48:06,167 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-14T06:48:06,167 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-14T06:48:06,168 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/20680646cf8a:0:becomeActiveMaster-HFileCleaner.large.0-1731566886167,5,FailOnTimeoutGroup] 2024-11-14T06:48:06,168 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/20680646cf8a:0:becomeActiveMaster-HFileCleaner.small.0-1731566886168,5,FailOnTimeoutGroup] 2024-11-14T06:48:06,168 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T06:48:06,168 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-14T06:48:06,168 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-14T06:48:06,168 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-14T06:48:06,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41929 is added to blk_1073741831_1007 (size=1321) 2024-11-14T06:48:06,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741831_1007 (size=1321) 2024-11-14T06:48:06,171 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-14T06:48:06,171 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0 2024-11-14T06:48:06,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41929 is added to blk_1073741832_1008 (size=32) 2024-11-14T06:48:06,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741832_1008 (size=32) 2024-11-14T06:48:06,181 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T06:48:06,184 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T06:48:06,185 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T06:48:06,185 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:48:06,186 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T06:48:06,186 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T06:48:06,187 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T06:48:06,187 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:48:06,188 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T06:48:06,188 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T06:48:06,189 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T06:48:06,189 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:48:06,189 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T06:48:06,189 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T06:48:06,190 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T06:48:06,190 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:48:06,191 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T06:48:06,191 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T06:48:06,192 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/hbase/meta/1588230740 2024-11-14T06:48:06,192 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/hbase/meta/1588230740 2024-11-14T06:48:06,193 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T06:48:06,193 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T06:48:06,194 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T06:48:06,195 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T06:48:06,197 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T06:48:06,197 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=745307, jitterRate=-0.052293941378593445}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T06:48:06,197 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731566886181Initializing all the Stores at 1731566886181Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731566886181Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731566886184 (+3 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731566886184Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731566886184Cleaning up temporary data from old regions at 1731566886193 (+9 ms)Region opened successfully at 1731566886197 (+4 ms) 2024-11-14T06:48:06,197 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T06:48:06,197 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T06:48:06,198 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T06:48:06,198 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T06:48:06,198 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T06:48:06,201 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T06:48:06,201 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731566886197Disabling compacts and flushes for region at 1731566886197Disabling writes for close at 1731566886198 (+1 ms)Writing region close event to WAL at 1731566886201 (+3 ms)Closed at 1731566886201 2024-11-14T06:48:06,202 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T06:48:06,202 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-14T06:48:06,202 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-14T06:48:06,204 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T06:48:06,205 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-14T06:48:06,227 INFO [RS:0;20680646cf8a:39097 {}] regionserver.HRegionServer(746): ClusterId : 460ddda4-a294-4efa-ae60-7be59c698400 2024-11-14T06:48:06,227 DEBUG [RS:0;20680646cf8a:39097 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-14T06:48:06,229 DEBUG [RS:0;20680646cf8a:39097 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-14T06:48:06,229 DEBUG [RS:0;20680646cf8a:39097 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-14T06:48:06,230 DEBUG [RS:0;20680646cf8a:39097 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-14T06:48:06,231 DEBUG [RS:0;20680646cf8a:39097 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@95966a6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=20680646cf8a/172.17.0.2:0 2024-11-14T06:48:06,240 DEBUG [RS:0;20680646cf8a:39097 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;20680646cf8a:39097 2024-11-14T06:48:06,240 INFO [RS:0;20680646cf8a:39097 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-14T06:48:06,240 INFO [RS:0;20680646cf8a:39097 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-14T06:48:06,240 DEBUG [RS:0;20680646cf8a:39097 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-14T06:48:06,241 INFO [RS:0;20680646cf8a:39097 {}] regionserver.HRegionServer(2659): reportForDuty to master=20680646cf8a,32979,1731566885953 with port=39097, startcode=1731566886006 2024-11-14T06:48:06,241 DEBUG [RS:0;20680646cf8a:39097 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-14T06:48:06,243 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47897, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-11-14T06:48:06,244 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32979 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 20680646cf8a,39097,1731566886006 2024-11-14T06:48:06,244 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32979 {}] master.ServerManager(517): Registering regionserver=20680646cf8a,39097,1731566886006 2024-11-14T06:48:06,245 DEBUG [RS:0;20680646cf8a:39097 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0 2024-11-14T06:48:06,245 DEBUG [RS:0;20680646cf8a:39097 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39515 2024-11-14T06:48:06,245 DEBUG [RS:0;20680646cf8a:39097 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-14T06:48:06,246 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32979-0x1003cfd38a10000, quorum=127.0.0.1:56956, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T06:48:06,247 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [20680646cf8a,39097,1731566886006] 2024-11-14T06:48:06,247 DEBUG [RS:0;20680646cf8a:39097 {}] zookeeper.ZKUtil(111): regionserver:39097-0x1003cfd38a10001, quorum=127.0.0.1:56956, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/20680646cf8a,39097,1731566886006 2024-11-14T06:48:06,247 WARN [RS:0;20680646cf8a:39097 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T06:48:06,247 INFO [RS:0;20680646cf8a:39097 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T06:48:06,247 DEBUG [RS:0;20680646cf8a:39097 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/WALs/20680646cf8a,39097,1731566886006 2024-11-14T06:48:06,255 INFO [RS:0;20680646cf8a:39097 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-14T06:48:06,257 INFO [RS:0;20680646cf8a:39097 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-14T06:48:06,258 INFO [RS:0;20680646cf8a:39097 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-14T06:48:06,258 INFO [RS:0;20680646cf8a:39097 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T06:48:06,258 INFO [RS:0;20680646cf8a:39097 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-14T06:48:06,259 INFO [RS:0;20680646cf8a:39097 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-14T06:48:06,259 INFO [RS:0;20680646cf8a:39097 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-14T06:48:06,259 DEBUG [RS:0;20680646cf8a:39097 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:48:06,259 DEBUG [RS:0;20680646cf8a:39097 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:48:06,259 DEBUG [RS:0;20680646cf8a:39097 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:48:06,259 DEBUG [RS:0;20680646cf8a:39097 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:48:06,259 DEBUG [RS:0;20680646cf8a:39097 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:48:06,260 DEBUG [RS:0;20680646cf8a:39097 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/20680646cf8a:0, corePoolSize=2, maxPoolSize=2 2024-11-14T06:48:06,260 DEBUG [RS:0;20680646cf8a:39097 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:48:06,260 DEBUG [RS:0;20680646cf8a:39097 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:48:06,260 DEBUG [RS:0;20680646cf8a:39097 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:48:06,260 DEBUG [RS:0;20680646cf8a:39097 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:48:06,260 DEBUG [RS:0;20680646cf8a:39097 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:48:06,260 DEBUG [RS:0;20680646cf8a:39097 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:48:06,260 DEBUG [RS:0;20680646cf8a:39097 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/20680646cf8a:0, corePoolSize=3, maxPoolSize=3 2024-11-14T06:48:06,260 DEBUG [RS:0;20680646cf8a:39097 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/20680646cf8a:0, corePoolSize=3, maxPoolSize=3 2024-11-14T06:48:06,263 INFO [RS:0;20680646cf8a:39097 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T06:48:06,264 INFO [RS:0;20680646cf8a:39097 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T06:48:06,264 INFO [RS:0;20680646cf8a:39097 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T06:48:06,264 INFO [RS:0;20680646cf8a:39097 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-14T06:48:06,264 INFO [RS:0;20680646cf8a:39097 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-14T06:48:06,264 INFO [RS:0;20680646cf8a:39097 {}] hbase.ChoreService(168): Chore ScheduledChore name=20680646cf8a,39097,1731566886006-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T06:48:06,278 INFO [RS:0;20680646cf8a:39097 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-14T06:48:06,278 INFO [RS:0;20680646cf8a:39097 {}] hbase.ChoreService(168): Chore ScheduledChore name=20680646cf8a,39097,1731566886006-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T06:48:06,278 INFO [RS:0;20680646cf8a:39097 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T06:48:06,278 INFO [RS:0;20680646cf8a:39097 {}] regionserver.Replication(171): 20680646cf8a,39097,1731566886006 started 2024-11-14T06:48:06,292 INFO [RS:0;20680646cf8a:39097 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T06:48:06,292 INFO [RS:0;20680646cf8a:39097 {}] regionserver.HRegionServer(1482): Serving as 20680646cf8a,39097,1731566886006, RpcServer on 20680646cf8a/172.17.0.2:39097, sessionid=0x1003cfd38a10001 2024-11-14T06:48:06,292 DEBUG [RS:0;20680646cf8a:39097 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-14T06:48:06,292 DEBUG [RS:0;20680646cf8a:39097 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 20680646cf8a,39097,1731566886006 2024-11-14T06:48:06,292 DEBUG [RS:0;20680646cf8a:39097 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '20680646cf8a,39097,1731566886006' 2024-11-14T06:48:06,292 DEBUG [RS:0;20680646cf8a:39097 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-14T06:48:06,293 DEBUG [RS:0;20680646cf8a:39097 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-14T06:48:06,293 DEBUG [RS:0;20680646cf8a:39097 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-14T06:48:06,293 DEBUG [RS:0;20680646cf8a:39097 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-14T06:48:06,293 DEBUG [RS:0;20680646cf8a:39097 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 20680646cf8a,39097,1731566886006 2024-11-14T06:48:06,293 DEBUG [RS:0;20680646cf8a:39097 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '20680646cf8a,39097,1731566886006' 2024-11-14T06:48:06,293 DEBUG [RS:0;20680646cf8a:39097 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-14T06:48:06,294 DEBUG [RS:0;20680646cf8a:39097 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-14T06:48:06,294 DEBUG [RS:0;20680646cf8a:39097 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-14T06:48:06,294 INFO [RS:0;20680646cf8a:39097 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-14T06:48:06,294 INFO [RS:0;20680646cf8a:39097 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-14T06:48:06,346 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:06,346 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:06,355 WARN [20680646cf8a:32979 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-14T06:48:06,396 INFO [RS:0;20680646cf8a:39097 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=20680646cf8a%2C39097%2C1731566886006, suffix=, logDir=hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/WALs/20680646cf8a,39097,1731566886006, archiveDir=hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/oldWALs, maxLogs=32 2024-11-14T06:48:06,397 INFO [RS:0;20680646cf8a:39097 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 20680646cf8a%2C39097%2C1731566886006.1731566886397 2024-11-14T06:48:06,402 INFO [RS:0;20680646cf8a:39097 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/WALs/20680646cf8a,39097,1731566886006/20680646cf8a%2C39097%2C1731566886006.1731566886397 2024-11-14T06:48:06,403 DEBUG [RS:0;20680646cf8a:39097 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39903:39903),(127.0.0.1/127.0.0.1:42521:42521)] 2024-11-14T06:48:06,605 DEBUG [20680646cf8a:32979 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-14T06:48:06,605 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=20680646cf8a,39097,1731566886006 2024-11-14T06:48:06,607 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 20680646cf8a,39097,1731566886006, state=OPENING 2024-11-14T06:48:06,608 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-14T06:48:06,608 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32979-0x1003cfd38a10000, quorum=127.0.0.1:56956, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:48:06,608 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39097-0x1003cfd38a10001, quorum=127.0.0.1:56956, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:48:06,609 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T06:48:06,609 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T06:48:06,609 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T06:48:06,609 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=20680646cf8a,39097,1731566886006}] 2024-11-14T06:48:06,761 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-14T06:48:06,763 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44771, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-14T06:48:06,767 INFO [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-14T06:48:06,767 INFO [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T06:48:06,768 INFO [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=20680646cf8a%2C39097%2C1731566886006.meta, suffix=.meta, logDir=hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/WALs/20680646cf8a,39097,1731566886006, archiveDir=hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/oldWALs, maxLogs=32 2024-11-14T06:48:06,769 INFO [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 20680646cf8a%2C39097%2C1731566886006.meta.1731566886769.meta 2024-11-14T06:48:06,775 INFO [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/WALs/20680646cf8a,39097,1731566886006/20680646cf8a%2C39097%2C1731566886006.meta.1731566886769.meta 2024-11-14T06:48:06,776 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42521:42521),(127.0.0.1/127.0.0.1:39903:39903)] 2024-11-14T06:48:06,777 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-14T06:48:06,777 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-14T06:48:06,777 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-14T06:48:06,777 INFO [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-14T06:48:06,777 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-14T06:48:06,777 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T06:48:06,778 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-14T06:48:06,778 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-14T06:48:06,785 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T06:48:06,786 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T06:48:06,786 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:48:06,787 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T06:48:06,787 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T06:48:06,788 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T06:48:06,788 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:48:06,788 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T06:48:06,789 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T06:48:06,790 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T06:48:06,790 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:48:06,790 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T06:48:06,790 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T06:48:06,791 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T06:48:06,791 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:48:06,792 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T06:48:06,792 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T06:48:06,792 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/hbase/meta/1588230740 2024-11-14T06:48:06,794 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/hbase/meta/1588230740 2024-11-14T06:48:06,795 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T06:48:06,795 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T06:48:06,795 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T06:48:06,796 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T06:48:06,797 INFO [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=705781, jitterRate=-0.10255412757396698}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T06:48:06,797 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-14T06:48:06,797 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731566886778Writing region info on filesystem at 1731566886778Initializing all the Stores at 1731566886779 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731566886779Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731566886781 (+2 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731566886781Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731566886781Cleaning up temporary data from old regions at 1731566886795 (+14 ms)Running coprocessor post-open hooks at 1731566886797 (+2 ms)Region opened successfully at 1731566886797 2024-11-14T06:48:06,799 INFO [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731566886761 2024-11-14T06:48:06,801 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-14T06:48:06,801 INFO [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-14T06:48:06,801 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=20680646cf8a,39097,1731566886006 2024-11-14T06:48:06,802 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 20680646cf8a,39097,1731566886006, state=OPEN 2024-11-14T06:48:06,804 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39097-0x1003cfd38a10001, quorum=127.0.0.1:56956, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T06:48:06,804 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32979-0x1003cfd38a10000, quorum=127.0.0.1:56956, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T06:48:06,804 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T06:48:06,804 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T06:48:06,804 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=20680646cf8a,39097,1731566886006 2024-11-14T06:48:06,807 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-14T06:48:06,808 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=20680646cf8a,39097,1731566886006 in 195 msec 2024-11-14T06:48:06,810 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-14T06:48:06,810 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 605 msec 2024-11-14T06:48:06,811 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T06:48:06,811 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-14T06:48:06,812 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T06:48:06,812 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=20680646cf8a,39097,1731566886006, seqNum=-1] 2024-11-14T06:48:06,813 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T06:48:06,814 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57921, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T06:48:06,819 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 660 msec 2024-11-14T06:48:06,820 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731566886820, completionTime=-1 2024-11-14T06:48:06,820 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-14T06:48:06,820 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-14T06:48:06,822 INFO [master/20680646cf8a:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-14T06:48:06,822 INFO [master/20680646cf8a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731566946822 2024-11-14T06:48:06,822 INFO [master/20680646cf8a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731567006822 2024-11-14T06:48:06,822 INFO [master/20680646cf8a:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-14T06:48:06,823 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=20680646cf8a,32979,1731566885953-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T06:48:06,823 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=20680646cf8a,32979,1731566885953-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T06:48:06,823 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=20680646cf8a,32979,1731566885953-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T06:48:06,823 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-20680646cf8a:32979, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T06:48:06,823 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-14T06:48:06,825 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-14T06:48:06,825 DEBUG [master/20680646cf8a:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-14T06:48:06,829 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.788sec 2024-11-14T06:48:06,829 INFO [master/20680646cf8a:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-14T06:48:06,829 INFO [master/20680646cf8a:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-14T06:48:06,830 INFO [master/20680646cf8a:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-14T06:48:06,830 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-14T06:48:06,830 INFO [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-14T06:48:06,830 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=20680646cf8a,32979,1731566885953-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T06:48:06,830 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=20680646cf8a,32979,1731566885953-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-14T06:48:06,835 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-14T06:48:06,835 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-14T06:48:06,835 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=20680646cf8a,32979,1731566885953-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T06:48:06,927 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@dcfcbff, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T06:48:06,928 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 20680646cf8a,32979,-1 for getting cluster id 2024-11-14T06:48:06,928 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-14T06:48:06,929 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '460ddda4-a294-4efa-ae60-7be59c698400' 2024-11-14T06:48:06,929 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-14T06:48:06,929 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "460ddda4-a294-4efa-ae60-7be59c698400" 2024-11-14T06:48:06,929 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@369f8df5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T06:48:06,929 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [20680646cf8a,32979,-1] 2024-11-14T06:48:06,930 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-14T06:48:06,930 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T06:48:06,931 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43632, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-14T06:48:06,932 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ce0a24, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T06:48:06,932 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T06:48:06,933 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=20680646cf8a,39097,1731566886006, seqNum=-1] 2024-11-14T06:48:06,933 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T06:48:06,934 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52444, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T06:48:06,935 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=20680646cf8a,32979,1731566885953 2024-11-14T06:48:06,936 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T06:48:06,938 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-14T06:48:06,939 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-14T06:48:06,940 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is 20680646cf8a,32979,1731566885953 2024-11-14T06:48:06,940 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@77682725 2024-11-14T06:48:06,940 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-14T06:48:06,941 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43646, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-14T06:48:06,942 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32979 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-14T06:48:06,942 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32979 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-14T06:48:06,942 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32979 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-14T06:48:06,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32979 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-11-14T06:48:06,945 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-14T06:48:06,945 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:48:06,946 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32979 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-11-14T06:48:06,947 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-14T06:48:06,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32979 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-14T06:48:06,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41929 is added to blk_1073741835_1011 (size=381) 2024-11-14T06:48:06,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741835_1011 (size=381) 2024-11-14T06:48:06,958 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => aa84aa8e79ae0ab4ee31e90c0e08f200, NAME => 'TestLogRolling-testLogRolling,,1731566886942.aa84aa8e79ae0ab4ee31e90c0e08f200.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0 2024-11-14T06:48:06,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41929 is added to blk_1073741836_1012 (size=64) 2024-11-14T06:48:06,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741836_1012 (size=64) 2024-11-14T06:48:06,970 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731566886942.aa84aa8e79ae0ab4ee31e90c0e08f200.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T06:48:06,970 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing aa84aa8e79ae0ab4ee31e90c0e08f200, disabling compactions & flushes 2024-11-14T06:48:06,970 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731566886942.aa84aa8e79ae0ab4ee31e90c0e08f200. 2024-11-14T06:48:06,970 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731566886942.aa84aa8e79ae0ab4ee31e90c0e08f200. 2024-11-14T06:48:06,971 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731566886942.aa84aa8e79ae0ab4ee31e90c0e08f200. after waiting 0 ms 2024-11-14T06:48:06,971 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731566886942.aa84aa8e79ae0ab4ee31e90c0e08f200. 2024-11-14T06:48:06,971 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731566886942.aa84aa8e79ae0ab4ee31e90c0e08f200. 2024-11-14T06:48:06,971 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for aa84aa8e79ae0ab4ee31e90c0e08f200: Waiting for close lock at 1731566886970Disabling compacts and flushes for region at 1731566886970Disabling writes for close at 1731566886971 (+1 ms)Writing region close event to WAL at 1731566886971Closed at 1731566886971 2024-11-14T06:48:06,972 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-14T06:48:06,972 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1731566886942.aa84aa8e79ae0ab4ee31e90c0e08f200.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731566886972"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731566886972"}]},"ts":"1731566886972"} 2024-11-14T06:48:06,975 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-14T06:48:06,976 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-14T06:48:06,976 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731566886976"}]},"ts":"1731566886976"} 2024-11-14T06:48:06,978 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-11-14T06:48:06,979 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=aa84aa8e79ae0ab4ee31e90c0e08f200, ASSIGN}] 2024-11-14T06:48:06,980 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=aa84aa8e79ae0ab4ee31e90c0e08f200, ASSIGN 2024-11-14T06:48:06,981 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=aa84aa8e79ae0ab4ee31e90c0e08f200, ASSIGN; state=OFFLINE, location=20680646cf8a,39097,1731566886006; forceNewPlan=false, retain=false 2024-11-14T06:48:07,132 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=aa84aa8e79ae0ab4ee31e90c0e08f200, regionState=OPENING, regionLocation=20680646cf8a,39097,1731566886006 2024-11-14T06:48:07,135 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=aa84aa8e79ae0ab4ee31e90c0e08f200, ASSIGN because future has completed 2024-11-14T06:48:07,135 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure aa84aa8e79ae0ab4ee31e90c0e08f200, server=20680646cf8a,39097,1731566886006}] 2024-11-14T06:48:07,293 INFO [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731566886942.aa84aa8e79ae0ab4ee31e90c0e08f200. 2024-11-14T06:48:07,293 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => aa84aa8e79ae0ab4ee31e90c0e08f200, NAME => 'TestLogRolling-testLogRolling,,1731566886942.aa84aa8e79ae0ab4ee31e90c0e08f200.', STARTKEY => '', ENDKEY => ''} 2024-11-14T06:48:07,294 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling aa84aa8e79ae0ab4ee31e90c0e08f200 2024-11-14T06:48:07,294 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731566886942.aa84aa8e79ae0ab4ee31e90c0e08f200.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T06:48:07,294 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for aa84aa8e79ae0ab4ee31e90c0e08f200 2024-11-14T06:48:07,294 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for aa84aa8e79ae0ab4ee31e90c0e08f200 2024-11-14T06:48:07,295 INFO [StoreOpener-aa84aa8e79ae0ab4ee31e90c0e08f200-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region aa84aa8e79ae0ab4ee31e90c0e08f200 2024-11-14T06:48:07,297 INFO [StoreOpener-aa84aa8e79ae0ab4ee31e90c0e08f200-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region aa84aa8e79ae0ab4ee31e90c0e08f200 columnFamilyName info 2024-11-14T06:48:07,297 DEBUG [StoreOpener-aa84aa8e79ae0ab4ee31e90c0e08f200-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:48:07,297 INFO [StoreOpener-aa84aa8e79ae0ab4ee31e90c0e08f200-1 {}] regionserver.HStore(327): Store=aa84aa8e79ae0ab4ee31e90c0e08f200/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T06:48:07,297 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for aa84aa8e79ae0ab4ee31e90c0e08f200 2024-11-14T06:48:07,298 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200 2024-11-14T06:48:07,298 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200 2024-11-14T06:48:07,299 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for aa84aa8e79ae0ab4ee31e90c0e08f200 2024-11-14T06:48:07,299 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for aa84aa8e79ae0ab4ee31e90c0e08f200 2024-11-14T06:48:07,300 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for aa84aa8e79ae0ab4ee31e90c0e08f200 2024-11-14T06:48:07,302 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T06:48:07,303 INFO [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened aa84aa8e79ae0ab4ee31e90c0e08f200; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=832619, jitterRate=0.05872991681098938}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-14T06:48:07,303 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for aa84aa8e79ae0ab4ee31e90c0e08f200 2024-11-14T06:48:07,304 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for aa84aa8e79ae0ab4ee31e90c0e08f200: Running coprocessor pre-open hook at 1731566887294Writing region info on filesystem at 1731566887294Initializing all the Stores at 1731566887295 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731566887295Cleaning up temporary data from old regions at 1731566887299 (+4 ms)Running coprocessor post-open hooks at 1731566887303 (+4 ms)Region opened successfully at 1731566887303 2024-11-14T06:48:07,305 INFO [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731566886942.aa84aa8e79ae0ab4ee31e90c0e08f200., pid=6, masterSystemTime=1731566887289 2024-11-14T06:48:07,307 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731566886942.aa84aa8e79ae0ab4ee31e90c0e08f200. 2024-11-14T06:48:07,307 INFO [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731566886942.aa84aa8e79ae0ab4ee31e90c0e08f200. 2024-11-14T06:48:07,308 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=aa84aa8e79ae0ab4ee31e90c0e08f200, regionState=OPEN, openSeqNum=2, regionLocation=20680646cf8a,39097,1731566886006 2024-11-14T06:48:07,310 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure aa84aa8e79ae0ab4ee31e90c0e08f200, server=20680646cf8a,39097,1731566886006 because future has completed 2024-11-14T06:48:07,313 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-14T06:48:07,313 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure aa84aa8e79ae0ab4ee31e90c0e08f200, server=20680646cf8a,39097,1731566886006 in 176 msec 2024-11-14T06:48:07,315 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-14T06:48:07,316 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=aa84aa8e79ae0ab4ee31e90c0e08f200, ASSIGN in 334 msec 2024-11-14T06:48:07,317 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-14T06:48:07,317 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731566887317"}]},"ts":"1731566887317"} 2024-11-14T06:48:07,320 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-11-14T06:48:07,322 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-14T06:48:07,324 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 380 msec 2024-11-14T06:48:07,346 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:07,346 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:08,347 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:08,347 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:09,348 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:09,348 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:09,641 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:09,641 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:09,641 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:09,641 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:09,641 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:09,642 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:09,642 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:09,642 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:09,665 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:09,666 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:09,666 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:09,666 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:09,667 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:09,667 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:09,671 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:09,671 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:09,671 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:09,673 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:10,178 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-14T06:48:10,178 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:10,179 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:10,179 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:10,179 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:10,179 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:10,180 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:10,181 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:10,181 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:10,217 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:10,217 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:10,217 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:10,218 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:10,218 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:10,218 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:10,222 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:10,222 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:10,223 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:10,225 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:10,348 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:10,348 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:11,349 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:11,349 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:12,256 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-14T06:48:12,256 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-11-14T06:48:12,350 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:12,350 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:13,350 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:13,350 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:14,351 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:14,351 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:14,528 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-14T06:48:14,528 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-14T06:48:14,529 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-14T06:48:15,351 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:15,351 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:16,352 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:16,352 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:16,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32979 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-14T06:48:16,974 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-11-14T06:48:16,974 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-11-14T06:48:16,977 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-11-14T06:48:16,977 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1731566886942.aa84aa8e79ae0ab4ee31e90c0e08f200. 2024-11-14T06:48:16,980 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1731566886942.aa84aa8e79ae0ab4ee31e90c0e08f200., hostname=20680646cf8a,39097,1731566886006, seqNum=2] 2024-11-14T06:48:16,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39097 {}] regionserver.HRegion(8855): Flush requested on aa84aa8e79ae0ab4ee31e90c0e08f200 2024-11-14T06:48:16,996 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing aa84aa8e79ae0ab4ee31e90c0e08f200 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-14T06:48:17,016 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/.tmp/info/de80576000774fb5872c194fe489e579 is 1080, key is row0001/info:/1731566896981/Put/seqid=0 2024-11-14T06:48:17,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41929 is added to blk_1073741837_1013 (size=12509) 2024-11-14T06:48:17,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741837_1013 (size=12509) 2024-11-14T06:48:17,022 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/.tmp/info/de80576000774fb5872c194fe489e579 2024-11-14T06:48:17,028 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/.tmp/info/de80576000774fb5872c194fe489e579 as hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/info/de80576000774fb5872c194fe489e579 2024-11-14T06:48:17,035 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/info/de80576000774fb5872c194fe489e579, entries=7, sequenceid=11, filesize=12.2 K 2024-11-14T06:48:17,036 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=18.91 KB/19368 for aa84aa8e79ae0ab4ee31e90c0e08f200 in 40ms, sequenceid=11, compaction requested=false 2024-11-14T06:48:17,036 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for aa84aa8e79ae0ab4ee31e90c0e08f200: 2024-11-14T06:48:17,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39097 {}] regionserver.HRegion(8855): Flush requested on aa84aa8e79ae0ab4ee31e90c0e08f200 2024-11-14T06:48:17,037 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing aa84aa8e79ae0ab4ee31e90c0e08f200 1/1 column families, dataSize=19.96 KB heapSize=21.63 KB 2024-11-14T06:48:17,042 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/.tmp/info/9b2ba342adc04132a8c500c3394f2a52 is 1080, key is row0008/info:/1731566896997/Put/seqid=0 2024-11-14T06:48:17,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741838_1014 (size=25453) 2024-11-14T06:48:17,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41929 is added to blk_1073741838_1014 (size=25453) 2024-11-14T06:48:17,048 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=19.96 KB at sequenceid=33 (bloomFilter=true), to=hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/.tmp/info/9b2ba342adc04132a8c500c3394f2a52 2024-11-14T06:48:17,053 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/.tmp/info/9b2ba342adc04132a8c500c3394f2a52 as hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/info/9b2ba342adc04132a8c500c3394f2a52 2024-11-14T06:48:17,059 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/info/9b2ba342adc04132a8c500c3394f2a52, entries=19, sequenceid=33, filesize=24.9 K 2024-11-14T06:48:17,060 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~19.96 KB/20444, heapSize ~21.61 KB/22128, currentSize=6.30 KB/6456 for aa84aa8e79ae0ab4ee31e90c0e08f200 in 23ms, sequenceid=33, compaction requested=false 2024-11-14T06:48:17,060 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for aa84aa8e79ae0ab4ee31e90c0e08f200: 2024-11-14T06:48:17,060 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=37.1 K, sizeToCheck=16.0 K 2024-11-14T06:48:17,060 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T06:48:17,060 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/info/9b2ba342adc04132a8c500c3394f2a52 because midkey is the same as first or last row 2024-11-14T06:48:17,353 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:17,353 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:18,354 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:18,354 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:19,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39097 {}] regionserver.HRegion(8855): Flush requested on aa84aa8e79ae0ab4ee31e90c0e08f200 2024-11-14T06:48:19,054 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing aa84aa8e79ae0ab4ee31e90c0e08f200 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-14T06:48:19,060 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/.tmp/info/3cb73d3d3e3149d581b72ba3e6670172 is 1080, key is row0027/info:/1731566897038/Put/seqid=0 2024-11-14T06:48:19,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741839_1015 (size=12509) 2024-11-14T06:48:19,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41929 is added to blk_1073741839_1015 (size=12509) 2024-11-14T06:48:19,066 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=43 (bloomFilter=true), to=hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/.tmp/info/3cb73d3d3e3149d581b72ba3e6670172 2024-11-14T06:48:19,073 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/.tmp/info/3cb73d3d3e3149d581b72ba3e6670172 as hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/info/3cb73d3d3e3149d581b72ba3e6670172 2024-11-14T06:48:19,080 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/info/3cb73d3d3e3149d581b72ba3e6670172, entries=7, sequenceid=43, filesize=12.2 K 2024-11-14T06:48:19,081 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=11.56 KB/11836 for aa84aa8e79ae0ab4ee31e90c0e08f200 in 27ms, sequenceid=43, compaction requested=true 2024-11-14T06:48:19,081 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for aa84aa8e79ae0ab4ee31e90c0e08f200: 2024-11-14T06:48:19,081 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=49.3 K, sizeToCheck=16.0 K 2024-11-14T06:48:19,081 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T06:48:19,081 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/info/9b2ba342adc04132a8c500c3394f2a52 because midkey is the same as first or last row 2024-11-14T06:48:19,081 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store aa84aa8e79ae0ab4ee31e90c0e08f200:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T06:48:19,082 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T06:48:19,082 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T06:48:19,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39097 {}] regionserver.HRegion(8855): Flush requested on aa84aa8e79ae0ab4ee31e90c0e08f200 2024-11-14T06:48:19,083 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 50471 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T06:48:19,083 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.HStore(1541): aa84aa8e79ae0ab4ee31e90c0e08f200/info is initiating minor compaction (all files) 2024-11-14T06:48:19,083 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing aa84aa8e79ae0ab4ee31e90c0e08f200 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-14T06:48:19,083 INFO [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of aa84aa8e79ae0ab4ee31e90c0e08f200/info in TestLogRolling-testLogRolling,,1731566886942.aa84aa8e79ae0ab4ee31e90c0e08f200. 2024-11-14T06:48:19,083 INFO [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/info/de80576000774fb5872c194fe489e579, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/info/9b2ba342adc04132a8c500c3394f2a52, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/info/3cb73d3d3e3149d581b72ba3e6670172] into tmpdir=hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/.tmp, totalSize=49.3 K 2024-11-14T06:48:19,084 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] compactions.Compactor(225): Compacting de80576000774fb5872c194fe489e579, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731566896981 2024-11-14T06:48:19,084 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9b2ba342adc04132a8c500c3394f2a52, keycount=19, bloomtype=ROW, size=24.9 K, encoding=NONE, compression=NONE, seqNum=33, earliestPutTs=1731566896997 2024-11-14T06:48:19,084 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3cb73d3d3e3149d581b72ba3e6670172, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=43, earliestPutTs=1731566897038 2024-11-14T06:48:19,088 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/.tmp/info/8dd3be4ddce145149868c293c76f6462 is 1080, key is row0034/info:/1731566899055/Put/seqid=0 2024-11-14T06:48:19,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741840_1016 (size=18987) 2024-11-14T06:48:19,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41929 is added to blk_1073741840_1016 (size=18987) 2024-11-14T06:48:19,105 INFO [RS:0;20680646cf8a:39097-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): aa84aa8e79ae0ab4ee31e90c0e08f200#info#compaction#60 average throughput is 16.93 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T06:48:19,105 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/.tmp/info/c3fee9a959d74851ad1b2981fcf8e0f9 is 1080, key is row0001/info:/1731566896981/Put/seqid=0 2024-11-14T06:48:19,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41929 is added to blk_1073741841_1017 (size=40670) 2024-11-14T06:48:19,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741841_1017 (size=40670) 2024-11-14T06:48:19,116 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/.tmp/info/c3fee9a959d74851ad1b2981fcf8e0f9 as hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/info/c3fee9a959d74851ad1b2981fcf8e0f9 2024-11-14T06:48:19,120 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39097 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=aa84aa8e79ae0ab4ee31e90c0e08f200, server=20680646cf8a,39097,1731566886006 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-14T06:48:19,123 INFO [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in aa84aa8e79ae0ab4ee31e90c0e08f200/info of aa84aa8e79ae0ab4ee31e90c0e08f200 into c3fee9a959d74851ad1b2981fcf8e0f9(size=39.7 K), total size for store is 39.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T06:48:19,123 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for aa84aa8e79ae0ab4ee31e90c0e08f200: 2024-11-14T06:48:19,123 INFO [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731566886942.aa84aa8e79ae0ab4ee31e90c0e08f200., storeName=aa84aa8e79ae0ab4ee31e90c0e08f200/info, priority=13, startTime=1731566899081; duration=0sec 2024-11-14T06:48:19,123 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.7 K, sizeToCheck=16.0 K 2024-11-14T06:48:19,123 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T06:48:19,123 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/info/c3fee9a959d74851ad1b2981fcf8e0f9 because midkey is the same as first or last row 2024-11-14T06:48:19,123 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.7 K, sizeToCheck=16.0 K 2024-11-14T06:48:19,123 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T06:48:19,123 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/info/c3fee9a959d74851ad1b2981fcf8e0f9 because midkey is the same as first or last row 2024-11-14T06:48:19,123 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.7 K, sizeToCheck=16.0 K 2024-11-14T06:48:19,123 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T06:48:19,123 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/info/c3fee9a959d74851ad1b2981fcf8e0f9 because midkey is the same as first or last row 2024-11-14T06:48:19,123 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T06:48:19,123 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: aa84aa8e79ae0ab4ee31e90c0e08f200:info 2024-11-14T06:48:19,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39097 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:52444 deadline: 1731566909120, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=aa84aa8e79ae0ab4ee31e90c0e08f200, server=20680646cf8a,39097,1731566886006 2024-11-14T06:48:19,142 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1731566886942.aa84aa8e79ae0ab4ee31e90c0e08f200., hostname=20680646cf8a,39097,1731566886006, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1731566886942.aa84aa8e79ae0ab4ee31e90c0e08f200., hostname=20680646cf8a,39097,1731566886006, seqNum=2, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=aa84aa8e79ae0ab4ee31e90c0e08f200, server=20680646cf8a,39097,1731566886006 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-14T06:48:19,143 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1731566886942.aa84aa8e79ae0ab4ee31e90c0e08f200., hostname=20680646cf8a,39097,1731566886006, seqNum=2 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=aa84aa8e79ae0ab4ee31e90c0e08f200, server=20680646cf8a,39097,1731566886006 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-14T06:48:19,143 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,,1731566886942.aa84aa8e79ae0ab4ee31e90c0e08f200., hostname=20680646cf8a,39097,1731566886006, seqNum=2 because the exception is null or not the one we care about 2024-11-14T06:48:19,355 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:19,355 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:19,504 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/.tmp/info/8dd3be4ddce145149868c293c76f6462 2024-11-14T06:48:19,516 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/.tmp/info/8dd3be4ddce145149868c293c76f6462 as hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/info/8dd3be4ddce145149868c293c76f6462 2024-11-14T06:48:19,524 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/info/8dd3be4ddce145149868c293c76f6462, entries=13, sequenceid=59, filesize=18.5 K 2024-11-14T06:48:19,525 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=16.81 KB/17216 for aa84aa8e79ae0ab4ee31e90c0e08f200 in 442ms, sequenceid=59, compaction requested=false 2024-11-14T06:48:19,525 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for aa84aa8e79ae0ab4ee31e90c0e08f200: 2024-11-14T06:48:19,525 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=58.3 K, sizeToCheck=16.0 K 2024-11-14T06:48:19,525 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T06:48:19,525 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/info/c3fee9a959d74851ad1b2981fcf8e0f9 because midkey is the same as first or last row 2024-11-14T06:48:20,033 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-14T06:48:20,035 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:20,035 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:20,036 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:20,036 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:20,036 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:20,037 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:20,038 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:20,038 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:20,064 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:20,064 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:20,064 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:20,065 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:20,065 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:20,065 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:20,068 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:20,068 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:20,069 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:20,071 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:20,355 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:20,355 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:21,356 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:21,356 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:22,357 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:22,357 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:23,357 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:23,357 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:24,358 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:24,358 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:25,360 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:25,360 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:26,361 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:26,362 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:27,363 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:27,363 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:28,364 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:28,364 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:29,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39097 {}] regionserver.HRegion(8855): Flush requested on aa84aa8e79ae0ab4ee31e90c0e08f200 2024-11-14T06:48:29,207 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing aa84aa8e79ae0ab4ee31e90c0e08f200 1/1 column families, dataSize=17.86 KB heapSize=19.38 KB 2024-11-14T06:48:29,215 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/.tmp/info/70172a59f88041df8880a4f13859fd3e is 1080, key is row0047/info:/1731566899084/Put/seqid=0 2024-11-14T06:48:29,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41929 is added to blk_1073741842_1018 (size=23299) 2024-11-14T06:48:29,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741842_1018 (size=23299) 2024-11-14T06:48:29,221 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.86 KB at sequenceid=80 (bloomFilter=true), to=hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/.tmp/info/70172a59f88041df8880a4f13859fd3e 2024-11-14T06:48:29,227 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/.tmp/info/70172a59f88041df8880a4f13859fd3e as hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/info/70172a59f88041df8880a4f13859fd3e 2024-11-14T06:48:29,232 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/info/70172a59f88041df8880a4f13859fd3e, entries=17, sequenceid=80, filesize=22.8 K 2024-11-14T06:48:29,233 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~17.86 KB/18292, heapSize ~19.36 KB/19824, currentSize=1.05 KB/1076 for aa84aa8e79ae0ab4ee31e90c0e08f200 in 26ms, sequenceid=80, compaction requested=true 2024-11-14T06:48:29,233 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for aa84aa8e79ae0ab4ee31e90c0e08f200: 2024-11-14T06:48:29,234 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=81.0 K, sizeToCheck=16.0 K 2024-11-14T06:48:29,234 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T06:48:29,234 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/info/c3fee9a959d74851ad1b2981fcf8e0f9 because midkey is the same as first or last row 2024-11-14T06:48:29,234 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store aa84aa8e79ae0ab4ee31e90c0e08f200:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T06:48:29,234 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T06:48:29,234 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T06:48:29,235 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 82956 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T06:48:29,235 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.HStore(1541): aa84aa8e79ae0ab4ee31e90c0e08f200/info is initiating minor compaction (all files) 2024-11-14T06:48:29,235 INFO [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of aa84aa8e79ae0ab4ee31e90c0e08f200/info in TestLogRolling-testLogRolling,,1731566886942.aa84aa8e79ae0ab4ee31e90c0e08f200. 2024-11-14T06:48:29,235 INFO [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/info/c3fee9a959d74851ad1b2981fcf8e0f9, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/info/8dd3be4ddce145149868c293c76f6462, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/info/70172a59f88041df8880a4f13859fd3e] into tmpdir=hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/.tmp, totalSize=81.0 K 2024-11-14T06:48:29,236 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] compactions.Compactor(225): Compacting c3fee9a959d74851ad1b2981fcf8e0f9, keycount=33, bloomtype=ROW, size=39.7 K, encoding=NONE, compression=NONE, seqNum=43, earliestPutTs=1731566896981 2024-11-14T06:48:29,236 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8dd3be4ddce145149868c293c76f6462, keycount=13, bloomtype=ROW, size=18.5 K, encoding=NONE, compression=NONE, seqNum=59, earliestPutTs=1731566899055 2024-11-14T06:48:29,237 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] compactions.Compactor(225): Compacting 70172a59f88041df8880a4f13859fd3e, keycount=17, bloomtype=ROW, size=22.8 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1731566899084 2024-11-14T06:48:29,251 INFO [RS:0;20680646cf8a:39097-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): aa84aa8e79ae0ab4ee31e90c0e08f200#info#compaction#62 average throughput is 21.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T06:48:29,252 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/.tmp/info/34e9b51ef2194bb78ee2ae209ab3a4ad is 1080, key is row0001/info:/1731566896981/Put/seqid=0 2024-11-14T06:48:29,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41929 is added to blk_1073741843_1019 (size=73224) 2024-11-14T06:48:29,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741843_1019 (size=73224) 2024-11-14T06:48:29,261 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/.tmp/info/34e9b51ef2194bb78ee2ae209ab3a4ad as hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/info/34e9b51ef2194bb78ee2ae209ab3a4ad 2024-11-14T06:48:29,267 INFO [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in aa84aa8e79ae0ab4ee31e90c0e08f200/info of aa84aa8e79ae0ab4ee31e90c0e08f200 into 34e9b51ef2194bb78ee2ae209ab3a4ad(size=71.5 K), total size for store is 71.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T06:48:29,267 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for aa84aa8e79ae0ab4ee31e90c0e08f200: 2024-11-14T06:48:29,267 INFO [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731566886942.aa84aa8e79ae0ab4ee31e90c0e08f200., storeName=aa84aa8e79ae0ab4ee31e90c0e08f200/info, priority=13, startTime=1731566909234; duration=0sec 2024-11-14T06:48:29,267 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=71.5 K, sizeToCheck=16.0 K 2024-11-14T06:48:29,267 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T06:48:29,267 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=71.5 K, sizeToCheck=16.0 K 2024-11-14T06:48:29,267 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T06:48:29,267 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=71.5 K, sizeToCheck=16.0 K 2024-11-14T06:48:29,267 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T06:48:29,268 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1731566886942.aa84aa8e79ae0ab4ee31e90c0e08f200., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T06:48:29,268 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T06:48:29,268 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: aa84aa8e79ae0ab4ee31e90c0e08f200:info 2024-11-14T06:48:29,269 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32979 {}] assignment.AssignmentManager(1363): Split request from 20680646cf8a,39097,1731566886006, parent={ENCODED => aa84aa8e79ae0ab4ee31e90c0e08f200, NAME => 'TestLogRolling-testLogRolling,,1731566886942.aa84aa8e79ae0ab4ee31e90c0e08f200.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-14T06:48:29,273 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32979 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=20680646cf8a,39097,1731566886006 2024-11-14T06:48:29,277 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32979 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=aa84aa8e79ae0ab4ee31e90c0e08f200, daughterA=6cf8faecc28e15c306ac8f738165ab4b, daughterB=f1a89a5774b34afce0f3d8eb00754b02 2024-11-14T06:48:29,278 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=aa84aa8e79ae0ab4ee31e90c0e08f200, daughterA=6cf8faecc28e15c306ac8f738165ab4b, daughterB=f1a89a5774b34afce0f3d8eb00754b02 2024-11-14T06:48:29,278 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=aa84aa8e79ae0ab4ee31e90c0e08f200, daughterA=6cf8faecc28e15c306ac8f738165ab4b, daughterB=f1a89a5774b34afce0f3d8eb00754b02 2024-11-14T06:48:29,278 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=aa84aa8e79ae0ab4ee31e90c0e08f200, daughterA=6cf8faecc28e15c306ac8f738165ab4b, daughterB=f1a89a5774b34afce0f3d8eb00754b02 2024-11-14T06:48:29,284 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=aa84aa8e79ae0ab4ee31e90c0e08f200, UNASSIGN}] 2024-11-14T06:48:29,285 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=aa84aa8e79ae0ab4ee31e90c0e08f200, UNASSIGN 2024-11-14T06:48:29,286 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=aa84aa8e79ae0ab4ee31e90c0e08f200, regionState=CLOSING, regionLocation=20680646cf8a,39097,1731566886006 2024-11-14T06:48:29,288 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=aa84aa8e79ae0ab4ee31e90c0e08f200, UNASSIGN because future has completed 2024-11-14T06:48:29,289 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-14T06:48:29,289 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure aa84aa8e79ae0ab4ee31e90c0e08f200, server=20680646cf8a,39097,1731566886006}] 2024-11-14T06:48:29,364 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:29,365 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:29,448 INFO [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close aa84aa8e79ae0ab4ee31e90c0e08f200 2024-11-14T06:48:29,448 DEBUG [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-14T06:48:29,449 DEBUG [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing aa84aa8e79ae0ab4ee31e90c0e08f200, disabling compactions & flushes 2024-11-14T06:48:29,449 INFO [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731566886942.aa84aa8e79ae0ab4ee31e90c0e08f200. 2024-11-14T06:48:29,449 DEBUG [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731566886942.aa84aa8e79ae0ab4ee31e90c0e08f200. 2024-11-14T06:48:29,449 DEBUG [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731566886942.aa84aa8e79ae0ab4ee31e90c0e08f200. after waiting 0 ms 2024-11-14T06:48:29,449 DEBUG [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731566886942.aa84aa8e79ae0ab4ee31e90c0e08f200. 2024-11-14T06:48:29,449 INFO [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(2902): Flushing aa84aa8e79ae0ab4ee31e90c0e08f200 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-14T06:48:29,455 DEBUG [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/.tmp/info/619eed19b1b54a86a469b7fc44cededb is 1080, key is row0064/info:/1731566909210/Put/seqid=0 2024-11-14T06:48:29,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741844_1020 (size=6033) 2024-11-14T06:48:29,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41929 is added to blk_1073741844_1020 (size=6033) 2024-11-14T06:48:29,460 INFO [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=85 (bloomFilter=true), to=hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/.tmp/info/619eed19b1b54a86a469b7fc44cededb 2024-11-14T06:48:29,465 DEBUG [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/.tmp/info/619eed19b1b54a86a469b7fc44cededb as hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/info/619eed19b1b54a86a469b7fc44cededb 2024-11-14T06:48:29,471 INFO [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/info/619eed19b1b54a86a469b7fc44cededb, entries=1, sequenceid=85, filesize=5.9 K 2024-11-14T06:48:29,473 INFO [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for aa84aa8e79ae0ab4ee31e90c0e08f200 in 23ms, sequenceid=85, compaction requested=false 2024-11-14T06:48:29,474 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731566886942.aa84aa8e79ae0ab4ee31e90c0e08f200.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/info/de80576000774fb5872c194fe489e579, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/info/9b2ba342adc04132a8c500c3394f2a52, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/info/c3fee9a959d74851ad1b2981fcf8e0f9, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/info/3cb73d3d3e3149d581b72ba3e6670172, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/info/8dd3be4ddce145149868c293c76f6462, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/info/70172a59f88041df8880a4f13859fd3e] to archive 2024-11-14T06:48:29,474 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731566886942.aa84aa8e79ae0ab4ee31e90c0e08f200.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-14T06:48:29,477 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731566886942.aa84aa8e79ae0ab4ee31e90c0e08f200.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/info/de80576000774fb5872c194fe489e579 to hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/archive/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/info/de80576000774fb5872c194fe489e579 2024-11-14T06:48:29,478 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731566886942.aa84aa8e79ae0ab4ee31e90c0e08f200.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/info/9b2ba342adc04132a8c500c3394f2a52 to hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/archive/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/info/9b2ba342adc04132a8c500c3394f2a52 2024-11-14T06:48:29,479 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731566886942.aa84aa8e79ae0ab4ee31e90c0e08f200.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/info/c3fee9a959d74851ad1b2981fcf8e0f9 to hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/archive/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/info/c3fee9a959d74851ad1b2981fcf8e0f9 2024-11-14T06:48:29,480 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731566886942.aa84aa8e79ae0ab4ee31e90c0e08f200.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/info/3cb73d3d3e3149d581b72ba3e6670172 to hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/archive/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/info/3cb73d3d3e3149d581b72ba3e6670172 2024-11-14T06:48:29,481 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731566886942.aa84aa8e79ae0ab4ee31e90c0e08f200.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/info/8dd3be4ddce145149868c293c76f6462 to hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/archive/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/info/8dd3be4ddce145149868c293c76f6462 2024-11-14T06:48:29,482 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731566886942.aa84aa8e79ae0ab4ee31e90c0e08f200.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/info/70172a59f88041df8880a4f13859fd3e to hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/archive/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/info/70172a59f88041df8880a4f13859fd3e 2024-11-14T06:48:29,488 DEBUG [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/recovered.edits/88.seqid, newMaxSeqId=88, maxSeqId=1 2024-11-14T06:48:29,489 INFO [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731566886942.aa84aa8e79ae0ab4ee31e90c0e08f200. 2024-11-14T06:48:29,489 DEBUG [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for aa84aa8e79ae0ab4ee31e90c0e08f200: Waiting for close lock at 1731566909449Running coprocessor pre-close hooks at 1731566909449Disabling compacts and flushes for region at 1731566909449Disabling writes for close at 1731566909449Obtaining lock to block concurrent updates at 1731566909449Preparing flush snapshotting stores in aa84aa8e79ae0ab4ee31e90c0e08f200 at 1731566909449Finished memstore snapshotting TestLogRolling-testLogRolling,,1731566886942.aa84aa8e79ae0ab4ee31e90c0e08f200., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1731566909450 (+1 ms)Flushing stores of TestLogRolling-testLogRolling,,1731566886942.aa84aa8e79ae0ab4ee31e90c0e08f200. at 1731566909451 (+1 ms)Flushing aa84aa8e79ae0ab4ee31e90c0e08f200/info: creating writer at 1731566909451Flushing aa84aa8e79ae0ab4ee31e90c0e08f200/info: appending metadata at 1731566909454 (+3 ms)Flushing aa84aa8e79ae0ab4ee31e90c0e08f200/info: closing flushed file at 1731566909454Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@78e70588: reopening flushed file at 1731566909465 (+11 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for aa84aa8e79ae0ab4ee31e90c0e08f200 in 23ms, sequenceid=85, compaction requested=false at 1731566909473 (+8 ms)Writing region close event to WAL at 1731566909485 (+12 ms)Running coprocessor post-close hooks at 1731566909489 (+4 ms)Closed at 1731566909489 2024-11-14T06:48:29,491 INFO [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed aa84aa8e79ae0ab4ee31e90c0e08f200 2024-11-14T06:48:29,491 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=aa84aa8e79ae0ab4ee31e90c0e08f200, regionState=CLOSED 2024-11-14T06:48:29,493 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure aa84aa8e79ae0ab4ee31e90c0e08f200, server=20680646cf8a,39097,1731566886006 because future has completed 2024-11-14T06:48:29,496 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-14T06:48:29,496 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure aa84aa8e79ae0ab4ee31e90c0e08f200, server=20680646cf8a,39097,1731566886006 in 205 msec 2024-11-14T06:48:29,498 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-14T06:48:29,498 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=aa84aa8e79ae0ab4ee31e90c0e08f200, UNASSIGN in 212 msec 2024-11-14T06:48:29,506 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:48:29,509 INFO [PEWorker-4 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 2 storefiles, region=aa84aa8e79ae0ab4ee31e90c0e08f200, threads=2 2024-11-14T06:48:29,511 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/info/34e9b51ef2194bb78ee2ae209ab3a4ad for region: aa84aa8e79ae0ab4ee31e90c0e08f200 2024-11-14T06:48:29,511 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/info/619eed19b1b54a86a469b7fc44cededb for region: aa84aa8e79ae0ab4ee31e90c0e08f200 2024-11-14T06:48:29,520 DEBUG [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/info/619eed19b1b54a86a469b7fc44cededb, top=true 2024-11-14T06:48:29,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741845_1021 (size=27) 2024-11-14T06:48:29,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41929 is added to blk_1073741845_1021 (size=27) 2024-11-14T06:48:29,526 INFO [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/TestLogRolling-testLogRolling=aa84aa8e79ae0ab4ee31e90c0e08f200-619eed19b1b54a86a469b7fc44cededb for child: f1a89a5774b34afce0f3d8eb00754b02, parent: aa84aa8e79ae0ab4ee31e90c0e08f200 2024-11-14T06:48:29,526 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/info/619eed19b1b54a86a469b7fc44cededb for region: aa84aa8e79ae0ab4ee31e90c0e08f200 2024-11-14T06:48:29,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41929 is added to blk_1073741846_1022 (size=27) 2024-11-14T06:48:29,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741846_1022 (size=27) 2024-11-14T06:48:29,533 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/info/34e9b51ef2194bb78ee2ae209ab3a4ad for region: aa84aa8e79ae0ab4ee31e90c0e08f200 2024-11-14T06:48:29,535 DEBUG [PEWorker-4 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region aa84aa8e79ae0ab4ee31e90c0e08f200 Daughter A: [hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/6cf8faecc28e15c306ac8f738165ab4b/info/34e9b51ef2194bb78ee2ae209ab3a4ad.aa84aa8e79ae0ab4ee31e90c0e08f200] storefiles, Daughter B: [hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/34e9b51ef2194bb78ee2ae209ab3a4ad.aa84aa8e79ae0ab4ee31e90c0e08f200, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/TestLogRolling-testLogRolling=aa84aa8e79ae0ab4ee31e90c0e08f200-619eed19b1b54a86a469b7fc44cededb] storefiles. 2024-11-14T06:48:29,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741847_1023 (size=71) 2024-11-14T06:48:29,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41929 is added to blk_1073741847_1023 (size=71) 2024-11-14T06:48:29,544 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:48:29,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741848_1024 (size=71) 2024-11-14T06:48:29,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41929 is added to blk_1073741848_1024 (size=71) 2024-11-14T06:48:29,556 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:48:29,564 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/6cf8faecc28e15c306ac8f738165ab4b/recovered.edits/88.seqid, newMaxSeqId=88, maxSeqId=-1 2024-11-14T06:48:29,565 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/recovered.edits/88.seqid, newMaxSeqId=88, maxSeqId=-1 2024-11-14T06:48:29,568 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731566886942.aa84aa8e79ae0ab4ee31e90c0e08f200.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731566909567"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1731566909567"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1731566909567"}]},"ts":"1731566909567"} 2024-11-14T06:48:29,568 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731566909273.6cf8faecc28e15c306ac8f738165ab4b.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731566909567"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731566909567"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731566909567"}]},"ts":"1731566909567"} 2024-11-14T06:48:29,568 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1731566909273.f1a89a5774b34afce0f3d8eb00754b02.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731566909567"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731566909567"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731566909567"}]},"ts":"1731566909567"} 2024-11-14T06:48:29,584 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=6cf8faecc28e15c306ac8f738165ab4b, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=f1a89a5774b34afce0f3d8eb00754b02, ASSIGN}] 2024-11-14T06:48:29,585 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=6cf8faecc28e15c306ac8f738165ab4b, ASSIGN 2024-11-14T06:48:29,585 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=f1a89a5774b34afce0f3d8eb00754b02, ASSIGN 2024-11-14T06:48:29,586 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=6cf8faecc28e15c306ac8f738165ab4b, ASSIGN; state=SPLITTING_NEW, location=20680646cf8a,39097,1731566886006; forceNewPlan=false, retain=false 2024-11-14T06:48:29,586 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=f1a89a5774b34afce0f3d8eb00754b02, ASSIGN; state=SPLITTING_NEW, location=20680646cf8a,39097,1731566886006; forceNewPlan=false, retain=false 2024-11-14T06:48:29,736 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=f1a89a5774b34afce0f3d8eb00754b02, regionState=OPENING, regionLocation=20680646cf8a,39097,1731566886006 2024-11-14T06:48:29,736 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=6cf8faecc28e15c306ac8f738165ab4b, regionState=OPENING, regionLocation=20680646cf8a,39097,1731566886006 2024-11-14T06:48:29,738 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=6cf8faecc28e15c306ac8f738165ab4b, ASSIGN because future has completed 2024-11-14T06:48:29,739 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6cf8faecc28e15c306ac8f738165ab4b, server=20680646cf8a,39097,1731566886006}] 2024-11-14T06:48:29,739 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=f1a89a5774b34afce0f3d8eb00754b02, ASSIGN because future has completed 2024-11-14T06:48:29,740 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure f1a89a5774b34afce0f3d8eb00754b02, server=20680646cf8a,39097,1731566886006}] 2024-11-14T06:48:29,898 INFO [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731566909273.6cf8faecc28e15c306ac8f738165ab4b. 2024-11-14T06:48:29,898 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => 6cf8faecc28e15c306ac8f738165ab4b, NAME => 'TestLogRolling-testLogRolling,,1731566909273.6cf8faecc28e15c306ac8f738165ab4b.', STARTKEY => '', ENDKEY => 'row0062'} 2024-11-14T06:48:29,899 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 6cf8faecc28e15c306ac8f738165ab4b 2024-11-14T06:48:29,899 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731566909273.6cf8faecc28e15c306ac8f738165ab4b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T06:48:29,899 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for 6cf8faecc28e15c306ac8f738165ab4b 2024-11-14T06:48:29,899 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for 6cf8faecc28e15c306ac8f738165ab4b 2024-11-14T06:48:29,902 INFO [StoreOpener-6cf8faecc28e15c306ac8f738165ab4b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 6cf8faecc28e15c306ac8f738165ab4b 2024-11-14T06:48:29,904 INFO [StoreOpener-6cf8faecc28e15c306ac8f738165ab4b-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6cf8faecc28e15c306ac8f738165ab4b columnFamilyName info 2024-11-14T06:48:29,904 DEBUG [StoreOpener-6cf8faecc28e15c306ac8f738165ab4b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:48:29,922 DEBUG [StoreOpener-6cf8faecc28e15c306ac8f738165ab4b-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/6cf8faecc28e15c306ac8f738165ab4b/info/34e9b51ef2194bb78ee2ae209ab3a4ad.aa84aa8e79ae0ab4ee31e90c0e08f200->hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/info/34e9b51ef2194bb78ee2ae209ab3a4ad-bottom 2024-11-14T06:48:29,923 INFO [StoreOpener-6cf8faecc28e15c306ac8f738165ab4b-1 {}] regionserver.HStore(327): Store=6cf8faecc28e15c306ac8f738165ab4b/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T06:48:29,923 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for 6cf8faecc28e15c306ac8f738165ab4b 2024-11-14T06:48:29,924 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/6cf8faecc28e15c306ac8f738165ab4b 2024-11-14T06:48:29,925 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/6cf8faecc28e15c306ac8f738165ab4b 2024-11-14T06:48:29,925 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for 6cf8faecc28e15c306ac8f738165ab4b 2024-11-14T06:48:29,925 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for 6cf8faecc28e15c306ac8f738165ab4b 2024-11-14T06:48:29,927 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for 6cf8faecc28e15c306ac8f738165ab4b 2024-11-14T06:48:29,928 INFO [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened 6cf8faecc28e15c306ac8f738165ab4b; next sequenceid=89; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=855372, jitterRate=0.08766216039657593}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-14T06:48:29,928 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 6cf8faecc28e15c306ac8f738165ab4b 2024-11-14T06:48:29,928 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for 6cf8faecc28e15c306ac8f738165ab4b: Running coprocessor pre-open hook at 1731566909899Writing region info on filesystem at 1731566909899Initializing all the Stores at 1731566909901 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731566909901Cleaning up temporary data from old regions at 1731566909925 (+24 ms)Running coprocessor post-open hooks at 1731566909928 (+3 ms)Region opened successfully at 1731566909928 2024-11-14T06:48:29,929 INFO [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731566909273.6cf8faecc28e15c306ac8f738165ab4b., pid=12, masterSystemTime=1731566909891 2024-11-14T06:48:29,929 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store 6cf8faecc28e15c306ac8f738165ab4b:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T06:48:29,929 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-14T06:48:29,929 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T06:48:29,930 INFO [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1731566909273.6cf8faecc28e15c306ac8f738165ab4b. 2024-11-14T06:48:29,930 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.HStore(1541): 6cf8faecc28e15c306ac8f738165ab4b/info is initiating minor compaction (all files) 2024-11-14T06:48:29,930 INFO [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 6cf8faecc28e15c306ac8f738165ab4b/info in TestLogRolling-testLogRolling,,1731566909273.6cf8faecc28e15c306ac8f738165ab4b. 2024-11-14T06:48:29,930 INFO [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/6cf8faecc28e15c306ac8f738165ab4b/info/34e9b51ef2194bb78ee2ae209ab3a4ad.aa84aa8e79ae0ab4ee31e90c0e08f200->hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/info/34e9b51ef2194bb78ee2ae209ab3a4ad-bottom] into tmpdir=hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/6cf8faecc28e15c306ac8f738165ab4b/.tmp, totalSize=71.5 K 2024-11-14T06:48:29,931 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] compactions.Compactor(225): Compacting 34e9b51ef2194bb78ee2ae209ab3a4ad.aa84aa8e79ae0ab4ee31e90c0e08f200, keycount=31, bloomtype=ROW, size=71.5 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1731566896981 2024-11-14T06:48:29,932 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731566909273.6cf8faecc28e15c306ac8f738165ab4b. 2024-11-14T06:48:29,932 INFO [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731566909273.6cf8faecc28e15c306ac8f738165ab4b. 2024-11-14T06:48:29,932 INFO [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1731566909273.f1a89a5774b34afce0f3d8eb00754b02. 2024-11-14T06:48:29,932 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => f1a89a5774b34afce0f3d8eb00754b02, NAME => 'TestLogRolling-testLogRolling,row0062,1731566909273.f1a89a5774b34afce0f3d8eb00754b02.', STARTKEY => 'row0062', ENDKEY => ''} 2024-11-14T06:48:29,932 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling f1a89a5774b34afce0f3d8eb00754b02 2024-11-14T06:48:29,932 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1731566909273.f1a89a5774b34afce0f3d8eb00754b02.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T06:48:29,932 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for f1a89a5774b34afce0f3d8eb00754b02 2024-11-14T06:48:29,932 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for f1a89a5774b34afce0f3d8eb00754b02 2024-11-14T06:48:29,933 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=6cf8faecc28e15c306ac8f738165ab4b, regionState=OPEN, openSeqNum=89, regionLocation=20680646cf8a,39097,1731566886006 2024-11-14T06:48:29,934 INFO [StoreOpener-f1a89a5774b34afce0f3d8eb00754b02-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region f1a89a5774b34afce0f3d8eb00754b02 2024-11-14T06:48:29,934 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39097 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-11-14T06:48:29,935 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-14T06:48:29,935 INFO [StoreOpener-f1a89a5774b34afce0f3d8eb00754b02-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f1a89a5774b34afce0f3d8eb00754b02 columnFamilyName info 2024-11-14T06:48:29,935 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.11 KB heapSize=8.96 KB 2024-11-14T06:48:29,935 DEBUG [StoreOpener-f1a89a5774b34afce0f3d8eb00754b02-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:48:29,935 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6cf8faecc28e15c306ac8f738165ab4b, server=20680646cf8a,39097,1731566886006 because future has completed 2024-11-14T06:48:29,939 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=10 2024-11-14T06:48:29,939 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure 6cf8faecc28e15c306ac8f738165ab4b, server=20680646cf8a,39097,1731566886006 in 197 msec 2024-11-14T06:48:29,941 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=6cf8faecc28e15c306ac8f738165ab4b, ASSIGN in 356 msec 2024-11-14T06:48:29,943 DEBUG [StoreOpener-f1a89a5774b34afce0f3d8eb00754b02-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/34e9b51ef2194bb78ee2ae209ab3a4ad.aa84aa8e79ae0ab4ee31e90c0e08f200->hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/info/34e9b51ef2194bb78ee2ae209ab3a4ad-top 2024-11-14T06:48:29,948 DEBUG [StoreOpener-f1a89a5774b34afce0f3d8eb00754b02-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/TestLogRolling-testLogRolling=aa84aa8e79ae0ab4ee31e90c0e08f200-619eed19b1b54a86a469b7fc44cededb 2024-11-14T06:48:29,948 INFO [StoreOpener-f1a89a5774b34afce0f3d8eb00754b02-1 {}] regionserver.HStore(327): Store=f1a89a5774b34afce0f3d8eb00754b02/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T06:48:29,949 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for f1a89a5774b34afce0f3d8eb00754b02 2024-11-14T06:48:29,949 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02 2024-11-14T06:48:29,950 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/hbase/meta/1588230740/.tmp/info/e7558612fec14bde9fae524b7fd354c8 is 193, key is TestLogRolling-testLogRolling,row0062,1731566909273.f1a89a5774b34afce0f3d8eb00754b02./info:regioninfo/1731566909736/Put/seqid=0 2024-11-14T06:48:29,951 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02 2024-11-14T06:48:29,952 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for f1a89a5774b34afce0f3d8eb00754b02 2024-11-14T06:48:29,952 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for f1a89a5774b34afce0f3d8eb00754b02 2024-11-14T06:48:29,952 INFO [RS:0;20680646cf8a:39097-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6cf8faecc28e15c306ac8f738165ab4b#info#compaction#65 average throughput is 20.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T06:48:29,953 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/6cf8faecc28e15c306ac8f738165ab4b/.tmp/info/75630419e75b4ec09a3f2d229e8f96ee is 1080, key is row0001/info:/1731566896981/Put/seqid=0 2024-11-14T06:48:29,954 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for f1a89a5774b34afce0f3d8eb00754b02 2024-11-14T06:48:29,955 INFO [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened f1a89a5774b34afce0f3d8eb00754b02; next sequenceid=89; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=867038, jitterRate=0.10249689221382141}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-14T06:48:29,955 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for f1a89a5774b34afce0f3d8eb00754b02 2024-11-14T06:48:29,955 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for f1a89a5774b34afce0f3d8eb00754b02: Running coprocessor pre-open hook at 1731566909932Writing region info on filesystem at 1731566909932Initializing all the Stores at 1731566909933 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731566909933Cleaning up temporary data from old regions at 1731566909952 (+19 ms)Running coprocessor post-open hooks at 1731566909955 (+3 ms)Region opened successfully at 1731566909955 2024-11-14T06:48:29,956 INFO [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1731566909273.f1a89a5774b34afce0f3d8eb00754b02., pid=13, masterSystemTime=1731566909891 2024-11-14T06:48:29,956 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store f1a89a5774b34afce0f3d8eb00754b02:info, priority=-2147483648, current under compaction store size is 2 2024-11-14T06:48:29,956 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T06:48:29,956 DEBUG [RS:0;20680646cf8a:39097-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-14T06:48:29,957 INFO [RS:0;20680646cf8a:39097-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1731566909273.f1a89a5774b34afce0f3d8eb00754b02. 2024-11-14T06:48:29,957 DEBUG [RS:0;20680646cf8a:39097-longCompactions-0 {}] regionserver.HStore(1541): f1a89a5774b34afce0f3d8eb00754b02/info is initiating minor compaction (all files) 2024-11-14T06:48:29,958 INFO [RS:0;20680646cf8a:39097-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of f1a89a5774b34afce0f3d8eb00754b02/info in TestLogRolling-testLogRolling,row0062,1731566909273.f1a89a5774b34afce0f3d8eb00754b02. 2024-11-14T06:48:29,958 INFO [RS:0;20680646cf8a:39097-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/34e9b51ef2194bb78ee2ae209ab3a4ad.aa84aa8e79ae0ab4ee31e90c0e08f200->hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/info/34e9b51ef2194bb78ee2ae209ab3a4ad-top, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/TestLogRolling-testLogRolling=aa84aa8e79ae0ab4ee31e90c0e08f200-619eed19b1b54a86a469b7fc44cededb] into tmpdir=hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp, totalSize=77.4 K 2024-11-14T06:48:29,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41929 is added to blk_1073741849_1025 (size=9847) 2024-11-14T06:48:29,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741849_1025 (size=9847) 2024-11-14T06:48:29,959 DEBUG [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1731566909273.f1a89a5774b34afce0f3d8eb00754b02. 2024-11-14T06:48:29,959 DEBUG [RS:0;20680646cf8a:39097-longCompactions-0 {}] compactions.Compactor(225): Compacting 34e9b51ef2194bb78ee2ae209ab3a4ad.aa84aa8e79ae0ab4ee31e90c0e08f200, keycount=31, bloomtype=ROW, size=71.5 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1731566896981 2024-11-14T06:48:29,959 INFO [RS_OPEN_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1731566909273.f1a89a5774b34afce0f3d8eb00754b02. 2024-11-14T06:48:29,959 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.92 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/hbase/meta/1588230740/.tmp/info/e7558612fec14bde9fae524b7fd354c8 2024-11-14T06:48:29,959 DEBUG [RS:0;20680646cf8a:39097-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=aa84aa8e79ae0ab4ee31e90c0e08f200-619eed19b1b54a86a469b7fc44cededb, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=85, earliestPutTs=1731566909210 2024-11-14T06:48:29,960 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=f1a89a5774b34afce0f3d8eb00754b02, regionState=OPEN, openSeqNum=89, regionLocation=20680646cf8a,39097,1731566886006 2024-11-14T06:48:29,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41929 is added to blk_1073741850_1026 (size=70862) 2024-11-14T06:48:29,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741850_1026 (size=70862) 2024-11-14T06:48:29,963 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure f1a89a5774b34afce0f3d8eb00754b02, server=20680646cf8a,39097,1731566886006 because future has completed 2024-11-14T06:48:29,968 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=11 2024-11-14T06:48:29,968 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure f1a89a5774b34afce0f3d8eb00754b02, server=20680646cf8a,39097,1731566886006 in 224 msec 2024-11-14T06:48:29,971 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=7 2024-11-14T06:48:29,971 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=f1a89a5774b34afce0f3d8eb00754b02, ASSIGN in 385 msec 2024-11-14T06:48:29,977 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=aa84aa8e79ae0ab4ee31e90c0e08f200, daughterA=6cf8faecc28e15c306ac8f738165ab4b, daughterB=f1a89a5774b34afce0f3d8eb00754b02 in 698 msec 2024-11-14T06:48:29,977 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/6cf8faecc28e15c306ac8f738165ab4b/.tmp/info/75630419e75b4ec09a3f2d229e8f96ee as hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/6cf8faecc28e15c306ac8f738165ab4b/info/75630419e75b4ec09a3f2d229e8f96ee 2024-11-14T06:48:29,984 INFO [RS:0;20680646cf8a:39097-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f1a89a5774b34afce0f3d8eb00754b02#info#compaction#66 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T06:48:29,984 INFO [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in 6cf8faecc28e15c306ac8f738165ab4b/info of 6cf8faecc28e15c306ac8f738165ab4b into 75630419e75b4ec09a3f2d229e8f96ee(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T06:48:29,984 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 6cf8faecc28e15c306ac8f738165ab4b: 2024-11-14T06:48:29,984 INFO [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731566909273.6cf8faecc28e15c306ac8f738165ab4b., storeName=6cf8faecc28e15c306ac8f738165ab4b/info, priority=15, startTime=1731566909929; duration=0sec 2024-11-14T06:48:29,984 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T06:48:29,984 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6cf8faecc28e15c306ac8f738165ab4b:info 2024-11-14T06:48:29,984 DEBUG [RS:0;20680646cf8a:39097-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/a45065c280694007a1ab6006b02cf117 is 1080, key is row0062/info:/1731566899118/Put/seqid=0 2024-11-14T06:48:29,984 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/hbase/meta/1588230740/.tmp/ns/042f57e2ecaa42c4a6fad3bd20a8f7d7 is 43, key is default/ns:d/1731566886814/Put/seqid=0 2024-11-14T06:48:29,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741851_1027 (size=8359) 2024-11-14T06:48:29,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41929 is added to blk_1073741851_1027 (size=8359) 2024-11-14T06:48:29,994 DEBUG [RS:0;20680646cf8a:39097-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/a45065c280694007a1ab6006b02cf117 as hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/a45065c280694007a1ab6006b02cf117 2024-11-14T06:48:29,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741852_1028 (size=5153) 2024-11-14T06:48:29,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41929 is added to blk_1073741852_1028 (size=5153) 2024-11-14T06:48:29,995 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/hbase/meta/1588230740/.tmp/ns/042f57e2ecaa42c4a6fad3bd20a8f7d7 2024-11-14T06:48:30,000 INFO [RS:0;20680646cf8a:39097-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 2 (all) file(s) in f1a89a5774b34afce0f3d8eb00754b02/info of f1a89a5774b34afce0f3d8eb00754b02 into a45065c280694007a1ab6006b02cf117(size=8.2 K), total size for store is 8.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T06:48:30,000 DEBUG [RS:0;20680646cf8a:39097-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for f1a89a5774b34afce0f3d8eb00754b02: 2024-11-14T06:48:30,000 INFO [RS:0;20680646cf8a:39097-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731566909273.f1a89a5774b34afce0f3d8eb00754b02., storeName=f1a89a5774b34afce0f3d8eb00754b02/info, priority=14, startTime=1731566909956; duration=0sec 2024-11-14T06:48:30,000 DEBUG [RS:0;20680646cf8a:39097-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T06:48:30,000 DEBUG [RS:0;20680646cf8a:39097-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f1a89a5774b34afce0f3d8eb00754b02:info 2024-11-14T06:48:30,012 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/hbase/meta/1588230740/.tmp/table/77fc804fccac465e9fd71dea6f7cfaa9 is 65, key is TestLogRolling-testLogRolling/table:state/1731566887317/Put/seqid=0 2024-11-14T06:48:30,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41929 is added to blk_1073741853_1029 (size=5340) 2024-11-14T06:48:30,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741853_1029 (size=5340) 2024-11-14T06:48:30,017 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/hbase/meta/1588230740/.tmp/table/77fc804fccac465e9fd71dea6f7cfaa9 2024-11-14T06:48:30,023 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/hbase/meta/1588230740/.tmp/info/e7558612fec14bde9fae524b7fd354c8 as hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/hbase/meta/1588230740/info/e7558612fec14bde9fae524b7fd354c8 2024-11-14T06:48:30,028 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/hbase/meta/1588230740/info/e7558612fec14bde9fae524b7fd354c8, entries=30, sequenceid=17, filesize=9.6 K 2024-11-14T06:48:30,029 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/hbase/meta/1588230740/.tmp/ns/042f57e2ecaa42c4a6fad3bd20a8f7d7 as hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/hbase/meta/1588230740/ns/042f57e2ecaa42c4a6fad3bd20a8f7d7 2024-11-14T06:48:30,034 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/hbase/meta/1588230740/ns/042f57e2ecaa42c4a6fad3bd20a8f7d7, entries=2, sequenceid=17, filesize=5.0 K 2024-11-14T06:48:30,035 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/hbase/meta/1588230740/.tmp/table/77fc804fccac465e9fd71dea6f7cfaa9 as hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/hbase/meta/1588230740/table/77fc804fccac465e9fd71dea6f7cfaa9 2024-11-14T06:48:30,039 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/hbase/meta/1588230740/table/77fc804fccac465e9fd71dea6f7cfaa9, entries=2, sequenceid=17, filesize=5.2 K 2024-11-14T06:48:30,040 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.11 KB/5234, heapSize ~8.66 KB/8872, currentSize=705 B/705 for 1588230740 in 105ms, sequenceid=17, compaction requested=false 2024-11-14T06:48:30,040 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-14T06:48:30,365 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:30,365 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:31,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39097 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:52444 deadline: 1731566921214, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731566886942.aa84aa8e79ae0ab4ee31e90c0e08f200. is not online on 20680646cf8a,39097,1731566886006 2024-11-14T06:48:31,217 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1731566886942.aa84aa8e79ae0ab4ee31e90c0e08f200., hostname=20680646cf8a,39097,1731566886006, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1731566886942.aa84aa8e79ae0ab4ee31e90c0e08f200., hostname=20680646cf8a,39097,1731566886006, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731566886942.aa84aa8e79ae0ab4ee31e90c0e08f200. is not online on 20680646cf8a,39097,1731566886006 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-14T06:48:31,217 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1731566886942.aa84aa8e79ae0ab4ee31e90c0e08f200., hostname=20680646cf8a,39097,1731566886006, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731566886942.aa84aa8e79ae0ab4ee31e90c0e08f200. is not online on 20680646cf8a,39097,1731566886006 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-14T06:48:31,217 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1731566886942.aa84aa8e79ae0ab4ee31e90c0e08f200., hostname=20680646cf8a,39097,1731566886006, seqNum=2 from cache 2024-11-14T06:48:31,367 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:31,367 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:32,368 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:32,368 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:33,369 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:33,369 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:34,370 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:34,370 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:34,489 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:34,489 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:34,490 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:34,490 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:34,490 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:34,490 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:34,491 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:34,491 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:34,527 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:34,528 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:34,528 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:34,528 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:34,528 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:34,529 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:34,534 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:34,535 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:34,535 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:34,539 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:35,050 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-14T06:48:35,051 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:35,051 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:35,051 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:35,051 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:35,052 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:35,052 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:35,052 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:35,052 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:35,078 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:35,078 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:35,078 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:35,079 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:35,079 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:35,079 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:35,083 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:35,083 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:35,083 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:35,087 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T06:48:35,370 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:35,370 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:35,937 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-14T06:48:36,371 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:36,371 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:37,372 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:37,372 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:38,374 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:38,374 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:39,375 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:39,375 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:40,376 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:40,376 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:41,273 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0065', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1731566909273.f1a89a5774b34afce0f3d8eb00754b02., hostname=20680646cf8a,39097,1731566886006, seqNum=89] 2024-11-14T06:48:41,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39097 {}] regionserver.HRegion(8855): Flush requested on f1a89a5774b34afce0f3d8eb00754b02 2024-11-14T06:48:41,285 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f1a89a5774b34afce0f3d8eb00754b02 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-14T06:48:41,289 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/6c35ad91950c4890b209fa4ac0fb1151 is 1080, key is row0065/info:/1731566921274/Put/seqid=0 2024-11-14T06:48:41,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41929 is added to blk_1073741854_1030 (size=12509) 2024-11-14T06:48:41,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741854_1030 (size=12509) 2024-11-14T06:48:41,294 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=99 (bloomFilter=true), to=hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/6c35ad91950c4890b209fa4ac0fb1151 2024-11-14T06:48:41,299 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/6c35ad91950c4890b209fa4ac0fb1151 as hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/6c35ad91950c4890b209fa4ac0fb1151 2024-11-14T06:48:41,305 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/6c35ad91950c4890b209fa4ac0fb1151, entries=7, sequenceid=99, filesize=12.2 K 2024-11-14T06:48:41,306 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=11.56 KB/11836 for f1a89a5774b34afce0f3d8eb00754b02 in 21ms, sequenceid=99, compaction requested=false 2024-11-14T06:48:41,306 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f1a89a5774b34afce0f3d8eb00754b02: 2024-11-14T06:48:41,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39097 {}] regionserver.HRegion(8855): Flush requested on f1a89a5774b34afce0f3d8eb00754b02 2024-11-14T06:48:41,308 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f1a89a5774b34afce0f3d8eb00754b02 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-14T06:48:41,313 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/d9cfc9dbbeab424b8aff63409db7eaa1 is 1080, key is row0072/info:/1731566921286/Put/seqid=0 2024-11-14T06:48:41,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41929 is added to blk_1073741855_1031 (size=17894) 2024-11-14T06:48:41,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741855_1031 (size=17894) 2024-11-14T06:48:41,322 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/d9cfc9dbbeab424b8aff63409db7eaa1 2024-11-14T06:48:41,327 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/d9cfc9dbbeab424b8aff63409db7eaa1 as hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/d9cfc9dbbeab424b8aff63409db7eaa1 2024-11-14T06:48:41,333 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/d9cfc9dbbeab424b8aff63409db7eaa1, entries=12, sequenceid=114, filesize=17.5 K 2024-11-14T06:48:41,334 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=12.61 KB/12912 for f1a89a5774b34afce0f3d8eb00754b02 in 26ms, sequenceid=114, compaction requested=true 2024-11-14T06:48:41,334 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f1a89a5774b34afce0f3d8eb00754b02: 2024-11-14T06:48:41,334 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f1a89a5774b34afce0f3d8eb00754b02:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T06:48:41,334 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T06:48:41,334 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T06:48:41,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39097 {}] regionserver.HRegion(8855): Flush requested on f1a89a5774b34afce0f3d8eb00754b02 2024-11-14T06:48:41,335 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f1a89a5774b34afce0f3d8eb00754b02 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-14T06:48:41,336 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38762 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T06:48:41,336 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.HStore(1541): f1a89a5774b34afce0f3d8eb00754b02/info is initiating minor compaction (all files) 2024-11-14T06:48:41,336 INFO [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of f1a89a5774b34afce0f3d8eb00754b02/info in TestLogRolling-testLogRolling,row0062,1731566909273.f1a89a5774b34afce0f3d8eb00754b02. 2024-11-14T06:48:41,336 INFO [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/a45065c280694007a1ab6006b02cf117, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/6c35ad91950c4890b209fa4ac0fb1151, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/d9cfc9dbbeab424b8aff63409db7eaa1] into tmpdir=hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp, totalSize=37.9 K 2024-11-14T06:48:41,336 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] compactions.Compactor(225): Compacting a45065c280694007a1ab6006b02cf117, keycount=3, bloomtype=ROW, size=8.2 K, encoding=NONE, compression=NONE, seqNum=85, earliestPutTs=1731566899118 2024-11-14T06:48:41,336 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6c35ad91950c4890b209fa4ac0fb1151, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=99, earliestPutTs=1731566921274 2024-11-14T06:48:41,337 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] compactions.Compactor(225): Compacting d9cfc9dbbeab424b8aff63409db7eaa1, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1731566921286 2024-11-14T06:48:41,338 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/f08d14fd307a4c498f1ba8b00d7207fa is 1080, key is row0084/info:/1731566921309/Put/seqid=0 2024-11-14T06:48:41,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741856_1032 (size=18988) 2024-11-14T06:48:41,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41929 is added to blk_1073741856_1032 (size=18988) 2024-11-14T06:48:41,343 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/f08d14fd307a4c498f1ba8b00d7207fa 2024-11-14T06:48:41,348 INFO [RS:0;20680646cf8a:39097-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f1a89a5774b34afce0f3d8eb00754b02#info#compaction#72 average throughput is 22.58 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T06:48:41,348 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/a07b4498e4d64c97b0bd1e49e2cb9a95 is 1080, key is row0062/info:/1731566899118/Put/seqid=0 2024-11-14T06:48:41,349 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/f08d14fd307a4c498f1ba8b00d7207fa as hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/f08d14fd307a4c498f1ba8b00d7207fa 2024-11-14T06:48:41,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41929 is added to blk_1073741857_1033 (size=28952) 2024-11-14T06:48:41,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741857_1033 (size=28952) 2024-11-14T06:48:41,355 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/f08d14fd307a4c498f1ba8b00d7207fa, entries=13, sequenceid=130, filesize=18.5 K 2024-11-14T06:48:41,357 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=0 B/0 for f1a89a5774b34afce0f3d8eb00754b02 in 21ms, sequenceid=130, compaction requested=false 2024-11-14T06:48:41,357 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f1a89a5774b34afce0f3d8eb00754b02: 2024-11-14T06:48:41,359 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/a07b4498e4d64c97b0bd1e49e2cb9a95 as hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/a07b4498e4d64c97b0bd1e49e2cb9a95 2024-11-14T06:48:41,364 INFO [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in f1a89a5774b34afce0f3d8eb00754b02/info of f1a89a5774b34afce0f3d8eb00754b02 into a07b4498e4d64c97b0bd1e49e2cb9a95(size=28.3 K), total size for store is 46.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T06:48:41,364 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for f1a89a5774b34afce0f3d8eb00754b02: 2024-11-14T06:48:41,364 INFO [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731566909273.f1a89a5774b34afce0f3d8eb00754b02., storeName=f1a89a5774b34afce0f3d8eb00754b02/info, priority=13, startTime=1731566921334; duration=0sec 2024-11-14T06:48:41,364 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T06:48:41,364 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f1a89a5774b34afce0f3d8eb00754b02:info 2024-11-14T06:48:41,377 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:41,377 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:42,378 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:42,378 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:43,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39097 {}] regionserver.HRegion(8855): Flush requested on f1a89a5774b34afce0f3d8eb00754b02 2024-11-14T06:48:43,356 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f1a89a5774b34afce0f3d8eb00754b02 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-14T06:48:43,360 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/826518e1ab1e41aa8fd7b8408cab26d3 is 1080, key is row0097/info:/1731566923338/Put/seqid=0 2024-11-14T06:48:43,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41929 is added to blk_1073741858_1034 (size=12516) 2024-11-14T06:48:43,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741858_1034 (size=12516) 2024-11-14T06:48:43,366 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=141 (bloomFilter=true), to=hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/826518e1ab1e41aa8fd7b8408cab26d3 2024-11-14T06:48:43,373 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/826518e1ab1e41aa8fd7b8408cab26d3 as hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/826518e1ab1e41aa8fd7b8408cab26d3 2024-11-14T06:48:43,378 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:43,378 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:43,380 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/826518e1ab1e41aa8fd7b8408cab26d3, entries=7, sequenceid=141, filesize=12.2 K 2024-11-14T06:48:43,380 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=11.56 KB/11836 for f1a89a5774b34afce0f3d8eb00754b02 in 24ms, sequenceid=141, compaction requested=true 2024-11-14T06:48:43,380 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f1a89a5774b34afce0f3d8eb00754b02: 2024-11-14T06:48:43,381 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f1a89a5774b34afce0f3d8eb00754b02:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T06:48:43,381 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T06:48:43,381 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T06:48:43,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39097 {}] regionserver.HRegion(8855): Flush requested on f1a89a5774b34afce0f3d8eb00754b02 2024-11-14T06:48:43,382 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 60456 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T06:48:43,382 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f1a89a5774b34afce0f3d8eb00754b02 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-14T06:48:43,382 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.HStore(1541): f1a89a5774b34afce0f3d8eb00754b02/info is initiating minor compaction (all files) 2024-11-14T06:48:43,382 INFO [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of f1a89a5774b34afce0f3d8eb00754b02/info in TestLogRolling-testLogRolling,row0062,1731566909273.f1a89a5774b34afce0f3d8eb00754b02. 2024-11-14T06:48:43,382 INFO [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/a07b4498e4d64c97b0bd1e49e2cb9a95, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/f08d14fd307a4c498f1ba8b00d7207fa, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/826518e1ab1e41aa8fd7b8408cab26d3] into tmpdir=hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp, totalSize=59.0 K 2024-11-14T06:48:43,383 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] compactions.Compactor(225): Compacting a07b4498e4d64c97b0bd1e49e2cb9a95, keycount=22, bloomtype=ROW, size=28.3 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1731566899118 2024-11-14T06:48:43,383 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] compactions.Compactor(225): Compacting f08d14fd307a4c498f1ba8b00d7207fa, keycount=13, bloomtype=ROW, size=18.5 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1731566921309 2024-11-14T06:48:43,383 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] compactions.Compactor(225): Compacting 826518e1ab1e41aa8fd7b8408cab26d3, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=141, earliestPutTs=1731566923338 2024-11-14T06:48:43,386 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/b104b4c2904b4d5e8ffe288251bc7b61 is 1080, key is row0104/info:/1731566923357/Put/seqid=0 2024-11-14T06:48:43,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741859_1035 (size=17906) 2024-11-14T06:48:43,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41929 is added to blk_1073741859_1035 (size=17906) 2024-11-14T06:48:43,395 INFO [RS:0;20680646cf8a:39097-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f1a89a5774b34afce0f3d8eb00754b02#info#compaction#75 average throughput is 21.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T06:48:43,395 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/9b5614b4c990427a80376028ac39d41f is 1080, key is row0062/info:/1731566899118/Put/seqid=0 2024-11-14T06:48:43,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741860_1036 (size=50638) 2024-11-14T06:48:43,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41929 is added to blk_1073741860_1036 (size=50638) 2024-11-14T06:48:43,791 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/b104b4c2904b4d5e8ffe288251bc7b61 2024-11-14T06:48:43,798 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/b104b4c2904b4d5e8ffe288251bc7b61 as hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/b104b4c2904b4d5e8ffe288251bc7b61 2024-11-14T06:48:43,803 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/b104b4c2904b4d5e8ffe288251bc7b61, entries=12, sequenceid=156, filesize=17.5 K 2024-11-14T06:48:43,804 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=13.66 KB/13988 for f1a89a5774b34afce0f3d8eb00754b02 in 422ms, sequenceid=156, compaction requested=false 2024-11-14T06:48:43,804 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f1a89a5774b34afce0f3d8eb00754b02: 2024-11-14T06:48:43,806 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/9b5614b4c990427a80376028ac39d41f as hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/9b5614b4c990427a80376028ac39d41f 2024-11-14T06:48:43,812 INFO [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in f1a89a5774b34afce0f3d8eb00754b02/info of f1a89a5774b34afce0f3d8eb00754b02 into 9b5614b4c990427a80376028ac39d41f(size=49.5 K), total size for store is 66.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T06:48:43,812 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for f1a89a5774b34afce0f3d8eb00754b02: 2024-11-14T06:48:43,812 INFO [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731566909273.f1a89a5774b34afce0f3d8eb00754b02., storeName=f1a89a5774b34afce0f3d8eb00754b02/info, priority=13, startTime=1731566923381; duration=0sec 2024-11-14T06:48:43,812 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T06:48:43,812 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f1a89a5774b34afce0f3d8eb00754b02:info 2024-11-14T06:48:44,379 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:44,379 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:45,379 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:45,379 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:45,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39097 {}] regionserver.HRegion(8855): Flush requested on f1a89a5774b34afce0f3d8eb00754b02 2024-11-14T06:48:45,426 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f1a89a5774b34afce0f3d8eb00754b02 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-14T06:48:45,430 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/7230f9ff4ad941568dfc2519daf9e918 is 1080, key is row0116/info:/1731566923383/Put/seqid=0 2024-11-14T06:48:45,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41929 is added to blk_1073741861_1037 (size=20078) 2024-11-14T06:48:45,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741861_1037 (size=20078) 2024-11-14T06:48:45,438 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=174 (bloomFilter=true), to=hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/7230f9ff4ad941568dfc2519daf9e918 2024-11-14T06:48:45,444 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/7230f9ff4ad941568dfc2519daf9e918 as hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/7230f9ff4ad941568dfc2519daf9e918 2024-11-14T06:48:45,455 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/7230f9ff4ad941568dfc2519daf9e918, entries=14, sequenceid=174, filesize=19.6 K 2024-11-14T06:48:45,456 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=10.51 KB/10760 for f1a89a5774b34afce0f3d8eb00754b02 in 30ms, sequenceid=174, compaction requested=true 2024-11-14T06:48:45,456 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f1a89a5774b34afce0f3d8eb00754b02: 2024-11-14T06:48:45,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39097 {}] regionserver.HRegion(8855): Flush requested on f1a89a5774b34afce0f3d8eb00754b02 2024-11-14T06:48:45,456 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f1a89a5774b34afce0f3d8eb00754b02:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T06:48:45,456 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T06:48:45,456 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T06:48:45,456 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f1a89a5774b34afce0f3d8eb00754b02 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-14T06:48:45,458 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 88622 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T06:48:45,458 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.HStore(1541): f1a89a5774b34afce0f3d8eb00754b02/info is initiating minor compaction (all files) 2024-11-14T06:48:45,458 INFO [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of f1a89a5774b34afce0f3d8eb00754b02/info in TestLogRolling-testLogRolling,row0062,1731566909273.f1a89a5774b34afce0f3d8eb00754b02. 2024-11-14T06:48:45,458 INFO [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/9b5614b4c990427a80376028ac39d41f, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/b104b4c2904b4d5e8ffe288251bc7b61, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/7230f9ff4ad941568dfc2519daf9e918] into tmpdir=hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp, totalSize=86.5 K 2024-11-14T06:48:45,458 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9b5614b4c990427a80376028ac39d41f, keycount=42, bloomtype=ROW, size=49.5 K, encoding=NONE, compression=NONE, seqNum=141, earliestPutTs=1731566899118 2024-11-14T06:48:45,459 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] compactions.Compactor(225): Compacting b104b4c2904b4d5e8ffe288251bc7b61, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1731566923357 2024-11-14T06:48:45,459 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7230f9ff4ad941568dfc2519daf9e918, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1731566923383 2024-11-14T06:48:45,461 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/7dd3aa085ae4432495572695e7acf398 is 1080, key is row0130/info:/1731566925427/Put/seqid=0 2024-11-14T06:48:45,476 INFO [RS:0;20680646cf8a:39097-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f1a89a5774b34afce0f3d8eb00754b02#info#compaction#78 average throughput is 34.89 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T06:48:45,477 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/cfe66d62a1564551b15003dde326833b is 1080, key is row0062/info:/1731566899118/Put/seqid=0 2024-11-14T06:48:45,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741862_1038 (size=16828) 2024-11-14T06:48:45,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41929 is added to blk_1073741862_1038 (size=16828) 2024-11-14T06:48:45,487 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=188 (bloomFilter=true), to=hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/7dd3aa085ae4432495572695e7acf398 2024-11-14T06:48:45,493 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/7dd3aa085ae4432495572695e7acf398 as hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/7dd3aa085ae4432495572695e7acf398 2024-11-14T06:48:45,499 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/7dd3aa085ae4432495572695e7acf398, entries=11, sequenceid=188, filesize=16.4 K 2024-11-14T06:48:45,500 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=14.71 KB/15064 for f1a89a5774b34afce0f3d8eb00754b02 in 44ms, sequenceid=188, compaction requested=false 2024-11-14T06:48:45,500 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f1a89a5774b34afce0f3d8eb00754b02: 2024-11-14T06:48:45,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39097 {}] regionserver.HRegion(8855): Flush requested on f1a89a5774b34afce0f3d8eb00754b02 2024-11-14T06:48:45,502 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f1a89a5774b34afce0f3d8eb00754b02 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-11-14T06:48:45,506 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/e5be554c7a2f4a22802cfd6c17340b06 is 1080, key is row0141/info:/1731566925458/Put/seqid=0 2024-11-14T06:48:45,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741863_1039 (size=78909) 2024-11-14T06:48:45,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41929 is added to blk_1073741863_1039 (size=78909) 2024-11-14T06:48:45,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41929 is added to blk_1073741864_1040 (size=22238) 2024-11-14T06:48:45,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741864_1040 (size=22238) 2024-11-14T06:48:45,513 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/e5be554c7a2f4a22802cfd6c17340b06 2024-11-14T06:48:45,514 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/cfe66d62a1564551b15003dde326833b as hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/cfe66d62a1564551b15003dde326833b 2024-11-14T06:48:45,519 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/e5be554c7a2f4a22802cfd6c17340b06 as hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/e5be554c7a2f4a22802cfd6c17340b06 2024-11-14T06:48:45,520 INFO [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in f1a89a5774b34afce0f3d8eb00754b02/info of f1a89a5774b34afce0f3d8eb00754b02 into cfe66d62a1564551b15003dde326833b(size=77.1 K), total size for store is 93.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T06:48:45,520 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for f1a89a5774b34afce0f3d8eb00754b02: 2024-11-14T06:48:45,520 INFO [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731566909273.f1a89a5774b34afce0f3d8eb00754b02., storeName=f1a89a5774b34afce0f3d8eb00754b02/info, priority=13, startTime=1731566925456; duration=0sec 2024-11-14T06:48:45,520 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T06:48:45,520 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f1a89a5774b34afce0f3d8eb00754b02:info 2024-11-14T06:48:45,525 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/e5be554c7a2f4a22802cfd6c17340b06, entries=16, sequenceid=207, filesize=21.7 K 2024-11-14T06:48:45,525 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=4.20 KB/4304 for f1a89a5774b34afce0f3d8eb00754b02 in 23ms, sequenceid=207, compaction requested=true 2024-11-14T06:48:45,526 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f1a89a5774b34afce0f3d8eb00754b02: 2024-11-14T06:48:45,526 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f1a89a5774b34afce0f3d8eb00754b02:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T06:48:45,526 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T06:48:45,526 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T06:48:45,527 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 117975 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T06:48:45,527 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.HStore(1541): f1a89a5774b34afce0f3d8eb00754b02/info is initiating minor compaction (all files) 2024-11-14T06:48:45,527 INFO [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of f1a89a5774b34afce0f3d8eb00754b02/info in TestLogRolling-testLogRolling,row0062,1731566909273.f1a89a5774b34afce0f3d8eb00754b02. 2024-11-14T06:48:45,527 INFO [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/cfe66d62a1564551b15003dde326833b, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/7dd3aa085ae4432495572695e7acf398, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/e5be554c7a2f4a22802cfd6c17340b06] into tmpdir=hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp, totalSize=115.2 K 2024-11-14T06:48:45,528 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] compactions.Compactor(225): Compacting cfe66d62a1564551b15003dde326833b, keycount=68, bloomtype=ROW, size=77.1 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1731566899118 2024-11-14T06:48:45,528 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7dd3aa085ae4432495572695e7acf398, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=188, earliestPutTs=1731566925427 2024-11-14T06:48:45,528 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] compactions.Compactor(225): Compacting e5be554c7a2f4a22802cfd6c17340b06, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1731566925458 2024-11-14T06:48:45,540 INFO [RS:0;20680646cf8a:39097-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f1a89a5774b34afce0f3d8eb00754b02#info#compaction#80 average throughput is 32.49 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T06:48:45,540 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/4f28c44821d14776bd6a75f1bc75c3c1 is 1080, key is row0062/info:/1731566899118/Put/seqid=0 2024-11-14T06:48:45,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41929 is added to blk_1073741865_1041 (size=108125) 2024-11-14T06:48:45,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741865_1041 (size=108125) 2024-11-14T06:48:45,551 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/4f28c44821d14776bd6a75f1bc75c3c1 as hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/4f28c44821d14776bd6a75f1bc75c3c1 2024-11-14T06:48:45,558 INFO [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in f1a89a5774b34afce0f3d8eb00754b02/info of f1a89a5774b34afce0f3d8eb00754b02 into 4f28c44821d14776bd6a75f1bc75c3c1(size=105.6 K), total size for store is 105.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T06:48:45,559 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for f1a89a5774b34afce0f3d8eb00754b02: 2024-11-14T06:48:45,559 INFO [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731566909273.f1a89a5774b34afce0f3d8eb00754b02., storeName=f1a89a5774b34afce0f3d8eb00754b02/info, priority=13, startTime=1731566925526; duration=0sec 2024-11-14T06:48:45,559 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T06:48:45,559 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f1a89a5774b34afce0f3d8eb00754b02:info 2024-11-14T06:48:46,380 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:46,380 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:47,007 INFO [master/20680646cf8a:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-14T06:48:47,007 INFO [master/20680646cf8a:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-14T06:48:47,381 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:47,381 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:47,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39097 {}] regionserver.HRegion(8855): Flush requested on f1a89a5774b34afce0f3d8eb00754b02 2024-11-14T06:48:47,518 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f1a89a5774b34afce0f3d8eb00754b02 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-14T06:48:47,522 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/ee13fd64ddb24a26a8432ad6096ccd1f is 1080, key is row0157/info:/1731566925504/Put/seqid=0 2024-11-14T06:48:47,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741866_1042 (size=12516) 2024-11-14T06:48:47,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41929 is added to blk_1073741866_1042 (size=12516) 2024-11-14T06:48:47,547 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=219 (bloomFilter=true), to=hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/ee13fd64ddb24a26a8432ad6096ccd1f 2024-11-14T06:48:47,552 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/ee13fd64ddb24a26a8432ad6096ccd1f as hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/ee13fd64ddb24a26a8432ad6096ccd1f 2024-11-14T06:48:47,557 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/ee13fd64ddb24a26a8432ad6096ccd1f, entries=7, sequenceid=219, filesize=12.2 K 2024-11-14T06:48:47,558 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=16.81 KB/17216 for f1a89a5774b34afce0f3d8eb00754b02 in 40ms, sequenceid=219, compaction requested=false 2024-11-14T06:48:47,558 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f1a89a5774b34afce0f3d8eb00754b02: 2024-11-14T06:48:47,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39097 {}] regionserver.HRegion(8855): Flush requested on f1a89a5774b34afce0f3d8eb00754b02 2024-11-14T06:48:47,559 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f1a89a5774b34afce0f3d8eb00754b02 1/1 column families, dataSize=17.86 KB heapSize=19.38 KB 2024-11-14T06:48:47,563 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/39fc539a27194e8ba070f62a65c2ddda is 1080, key is row0164/info:/1731566927520/Put/seqid=0 2024-11-14T06:48:47,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741867_1043 (size=23316) 2024-11-14T06:48:47,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41929 is added to blk_1073741867_1043 (size=23316) 2024-11-14T06:48:47,579 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.86 KB at sequenceid=239 (bloomFilter=true), to=hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/39fc539a27194e8ba070f62a65c2ddda 2024-11-14T06:48:47,585 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/39fc539a27194e8ba070f62a65c2ddda as hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/39fc539a27194e8ba070f62a65c2ddda 2024-11-14T06:48:47,590 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/39fc539a27194e8ba070f62a65c2ddda, entries=17, sequenceid=239, filesize=22.8 K 2024-11-14T06:48:47,591 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~17.86 KB/18292, heapSize ~19.36 KB/19824, currentSize=12.61 KB/12912 for f1a89a5774b34afce0f3d8eb00754b02 in 32ms, sequenceid=239, compaction requested=true 2024-11-14T06:48:47,591 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f1a89a5774b34afce0f3d8eb00754b02: 2024-11-14T06:48:47,591 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f1a89a5774b34afce0f3d8eb00754b02:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T06:48:47,591 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T06:48:47,591 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T06:48:47,592 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 143957 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T06:48:47,592 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.HStore(1541): f1a89a5774b34afce0f3d8eb00754b02/info is initiating minor compaction (all files) 2024-11-14T06:48:47,592 INFO [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of f1a89a5774b34afce0f3d8eb00754b02/info in TestLogRolling-testLogRolling,row0062,1731566909273.f1a89a5774b34afce0f3d8eb00754b02. 2024-11-14T06:48:47,592 INFO [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/4f28c44821d14776bd6a75f1bc75c3c1, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/ee13fd64ddb24a26a8432ad6096ccd1f, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/39fc539a27194e8ba070f62a65c2ddda] into tmpdir=hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp, totalSize=140.6 K 2024-11-14T06:48:47,593 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4f28c44821d14776bd6a75f1bc75c3c1, keycount=95, bloomtype=ROW, size=105.6 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1731566899118 2024-11-14T06:48:47,593 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] compactions.Compactor(225): Compacting ee13fd64ddb24a26a8432ad6096ccd1f, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1731566925504 2024-11-14T06:48:47,593 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] compactions.Compactor(225): Compacting 39fc539a27194e8ba070f62a65c2ddda, keycount=17, bloomtype=ROW, size=22.8 K, encoding=NONE, compression=NONE, seqNum=239, earliestPutTs=1731566927520 2024-11-14T06:48:47,607 INFO [RS:0;20680646cf8a:39097-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f1a89a5774b34afce0f3d8eb00754b02#info#compaction#83 average throughput is 40.70 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T06:48:47,608 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/146e5e291518436b8644624ae5fbb4c0 is 1080, key is row0062/info:/1731566899118/Put/seqid=0 2024-11-14T06:48:47,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741868_1044 (size=134231) 2024-11-14T06:48:47,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41929 is added to blk_1073741868_1044 (size=134231) 2024-11-14T06:48:47,620 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/146e5e291518436b8644624ae5fbb4c0 as hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/146e5e291518436b8644624ae5fbb4c0 2024-11-14T06:48:47,628 INFO [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in f1a89a5774b34afce0f3d8eb00754b02/info of f1a89a5774b34afce0f3d8eb00754b02 into 146e5e291518436b8644624ae5fbb4c0(size=131.1 K), total size for store is 131.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T06:48:47,628 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for f1a89a5774b34afce0f3d8eb00754b02: 2024-11-14T06:48:47,628 INFO [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731566909273.f1a89a5774b34afce0f3d8eb00754b02., storeName=f1a89a5774b34afce0f3d8eb00754b02/info, priority=13, startTime=1731566927591; duration=0sec 2024-11-14T06:48:47,628 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T06:48:47,628 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f1a89a5774b34afce0f3d8eb00754b02:info 2024-11-14T06:48:48,381 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:48,381 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:49,382 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:49,382 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:49,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39097 {}] regionserver.HRegion(8855): Flush requested on f1a89a5774b34afce0f3d8eb00754b02 2024-11-14T06:48:49,590 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f1a89a5774b34afce0f3d8eb00754b02 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-14T06:48:49,595 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/65f899fac698449d8802844382165caf is 1080, key is row0181/info:/1731566927560/Put/seqid=0 2024-11-14T06:48:49,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41929 is added to blk_1073741869_1045 (size=19000) 2024-11-14T06:48:49,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741869_1045 (size=19000) 2024-11-14T06:48:49,603 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=256 (bloomFilter=true), to=hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/65f899fac698449d8802844382165caf 2024-11-14T06:48:49,609 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/65f899fac698449d8802844382165caf as hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/65f899fac698449d8802844382165caf 2024-11-14T06:48:49,614 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/65f899fac698449d8802844382165caf, entries=13, sequenceid=256, filesize=18.6 K 2024-11-14T06:48:49,615 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=9.46 KB/9684 for f1a89a5774b34afce0f3d8eb00754b02 in 25ms, sequenceid=256, compaction requested=false 2024-11-14T06:48:49,615 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f1a89a5774b34afce0f3d8eb00754b02: 2024-11-14T06:48:49,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39097 {}] regionserver.HRegion(8855): Flush requested on f1a89a5774b34afce0f3d8eb00754b02 2024-11-14T06:48:49,617 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f1a89a5774b34afce0f3d8eb00754b02 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-14T06:48:49,622 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/d09fe7e3ceaa42e8a08133cfc6e4048a is 1080, key is row0194/info:/1731566929591/Put/seqid=0 2024-11-14T06:48:49,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741870_1046 (size=16839) 2024-11-14T06:48:49,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41929 is added to blk_1073741870_1046 (size=16839) 2024-11-14T06:48:49,631 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=270 (bloomFilter=true), to=hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/d09fe7e3ceaa42e8a08133cfc6e4048a 2024-11-14T06:48:49,638 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/d09fe7e3ceaa42e8a08133cfc6e4048a as hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/d09fe7e3ceaa42e8a08133cfc6e4048a 2024-11-14T06:48:49,644 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/d09fe7e3ceaa42e8a08133cfc6e4048a, entries=11, sequenceid=270, filesize=16.4 K 2024-11-14T06:48:49,645 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=12.61 KB/12912 for f1a89a5774b34afce0f3d8eb00754b02 in 27ms, sequenceid=270, compaction requested=true 2024-11-14T06:48:49,645 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f1a89a5774b34afce0f3d8eb00754b02: 2024-11-14T06:48:49,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39097 {}] regionserver.HRegion(8855): Flush requested on f1a89a5774b34afce0f3d8eb00754b02 2024-11-14T06:48:49,645 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f1a89a5774b34afce0f3d8eb00754b02:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T06:48:49,645 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T06:48:49,645 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T06:48:49,645 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f1a89a5774b34afce0f3d8eb00754b02 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-14T06:48:49,646 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 170070 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T06:48:49,646 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.HStore(1541): f1a89a5774b34afce0f3d8eb00754b02/info is initiating minor compaction (all files) 2024-11-14T06:48:49,646 INFO [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of f1a89a5774b34afce0f3d8eb00754b02/info in TestLogRolling-testLogRolling,row0062,1731566909273.f1a89a5774b34afce0f3d8eb00754b02. 2024-11-14T06:48:49,646 INFO [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/146e5e291518436b8644624ae5fbb4c0, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/65f899fac698449d8802844382165caf, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/d09fe7e3ceaa42e8a08133cfc6e4048a] into tmpdir=hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp, totalSize=166.1 K 2024-11-14T06:48:49,647 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] compactions.Compactor(225): Compacting 146e5e291518436b8644624ae5fbb4c0, keycount=119, bloomtype=ROW, size=131.1 K, encoding=NONE, compression=NONE, seqNum=239, earliestPutTs=1731566899118 2024-11-14T06:48:49,647 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] compactions.Compactor(225): Compacting 65f899fac698449d8802844382165caf, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=256, earliestPutTs=1731566927560 2024-11-14T06:48:49,647 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] compactions.Compactor(225): Compacting d09fe7e3ceaa42e8a08133cfc6e4048a, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=270, earliestPutTs=1731566929591 2024-11-14T06:48:49,649 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/891ef293293240589570c2eafe8338f6 is 1080, key is row0205/info:/1731566929618/Put/seqid=0 2024-11-14T06:48:49,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41929 is added to blk_1073741871_1047 (size=19013) 2024-11-14T06:48:49,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741871_1047 (size=19013) 2024-11-14T06:48:49,665 INFO [RS:0;20680646cf8a:39097-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f1a89a5774b34afce0f3d8eb00754b02#info#compaction#87 average throughput is 48.91 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T06:48:49,665 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/5a0572fbb53b4a88ab49306e27b53996 is 1080, key is row0062/info:/1731566899118/Put/seqid=0 2024-11-14T06:48:49,666 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=286 (bloomFilter=true), to=hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/891ef293293240589570c2eafe8338f6 2024-11-14T06:48:49,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741872_1048 (size=160293) 2024-11-14T06:48:49,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41929 is added to blk_1073741872_1048 (size=160293) 2024-11-14T06:48:49,672 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/891ef293293240589570c2eafe8338f6 as hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/891ef293293240589570c2eafe8338f6 2024-11-14T06:48:49,676 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/891ef293293240589570c2eafe8338f6, entries=13, sequenceid=286, filesize=18.6 K 2024-11-14T06:48:49,677 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/5a0572fbb53b4a88ab49306e27b53996 as hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/5a0572fbb53b4a88ab49306e27b53996 2024-11-14T06:48:49,677 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=7.36 KB/7532 for f1a89a5774b34afce0f3d8eb00754b02 in 32ms, sequenceid=286, compaction requested=false 2024-11-14T06:48:49,677 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f1a89a5774b34afce0f3d8eb00754b02: 2024-11-14T06:48:49,683 INFO [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in f1a89a5774b34afce0f3d8eb00754b02/info of f1a89a5774b34afce0f3d8eb00754b02 into 5a0572fbb53b4a88ab49306e27b53996(size=156.5 K), total size for store is 175.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T06:48:49,683 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for f1a89a5774b34afce0f3d8eb00754b02: 2024-11-14T06:48:49,683 INFO [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731566909273.f1a89a5774b34afce0f3d8eb00754b02., storeName=f1a89a5774b34afce0f3d8eb00754b02/info, priority=13, startTime=1731566929645; duration=0sec 2024-11-14T06:48:49,683 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T06:48:49,683 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f1a89a5774b34afce0f3d8eb00754b02:info 2024-11-14T06:48:50,382 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:50,382 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:51,383 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:51,384 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:51,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39097 {}] regionserver.HRegion(8855): Flush requested on f1a89a5774b34afce0f3d8eb00754b02 2024-11-14T06:48:51,662 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f1a89a5774b34afce0f3d8eb00754b02 1/1 column families, dataSize=8.41 KB heapSize=9.25 KB 2024-11-14T06:48:51,666 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/bf6da3278e3b40c1a2a2c9bb7b34b6ed is 1080, key is row0218/info:/1731566929646/Put/seqid=0 2024-11-14T06:48:51,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41929 is added to blk_1073741873_1049 (size=13602) 2024-11-14T06:48:51,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741873_1049 (size=13602) 2024-11-14T06:48:51,670 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.41 KB at sequenceid=298 (bloomFilter=true), to=hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/bf6da3278e3b40c1a2a2c9bb7b34b6ed 2024-11-14T06:48:51,675 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/bf6da3278e3b40c1a2a2c9bb7b34b6ed as hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/bf6da3278e3b40c1a2a2c9bb7b34b6ed 2024-11-14T06:48:51,681 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/bf6da3278e3b40c1a2a2c9bb7b34b6ed, entries=8, sequenceid=298, filesize=13.3 K 2024-11-14T06:48:51,682 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~8.41 KB/8608, heapSize ~9.23 KB/9456, currentSize=10.51 KB/10760 for f1a89a5774b34afce0f3d8eb00754b02 in 20ms, sequenceid=298, compaction requested=true 2024-11-14T06:48:51,682 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f1a89a5774b34afce0f3d8eb00754b02: 2024-11-14T06:48:51,682 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f1a89a5774b34afce0f3d8eb00754b02:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T06:48:51,682 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T06:48:51,682 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T06:48:51,683 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 192908 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T06:48:51,683 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.HStore(1541): f1a89a5774b34afce0f3d8eb00754b02/info is initiating minor compaction (all files) 2024-11-14T06:48:51,683 INFO [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of f1a89a5774b34afce0f3d8eb00754b02/info in TestLogRolling-testLogRolling,row0062,1731566909273.f1a89a5774b34afce0f3d8eb00754b02. 2024-11-14T06:48:51,683 INFO [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/5a0572fbb53b4a88ab49306e27b53996, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/891ef293293240589570c2eafe8338f6, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/bf6da3278e3b40c1a2a2c9bb7b34b6ed] into tmpdir=hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp, totalSize=188.4 K 2024-11-14T06:48:51,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39097 {}] regionserver.HRegion(8855): Flush requested on f1a89a5774b34afce0f3d8eb00754b02 2024-11-14T06:48:51,683 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f1a89a5774b34afce0f3d8eb00754b02 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-14T06:48:51,684 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5a0572fbb53b4a88ab49306e27b53996, keycount=143, bloomtype=ROW, size=156.5 K, encoding=NONE, compression=NONE, seqNum=270, earliestPutTs=1731566899118 2024-11-14T06:48:51,684 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] compactions.Compactor(225): Compacting 891ef293293240589570c2eafe8338f6, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=286, earliestPutTs=1731566929618 2024-11-14T06:48:51,684 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] compactions.Compactor(225): Compacting bf6da3278e3b40c1a2a2c9bb7b34b6ed, keycount=8, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=298, earliestPutTs=1731566929646 2024-11-14T06:48:51,687 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/9e2c626bbd594592a318c44543f92e81 is 1080, key is row0226/info:/1731566931663/Put/seqid=0 2024-11-14T06:48:51,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41929 is added to blk_1073741874_1050 (size=17918) 2024-11-14T06:48:51,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741874_1050 (size=17918) 2024-11-14T06:48:51,693 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=313 (bloomFilter=true), to=hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/9e2c626bbd594592a318c44543f92e81 2024-11-14T06:48:51,697 INFO [RS:0;20680646cf8a:39097-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f1a89a5774b34afce0f3d8eb00754b02#info#compaction#90 average throughput is 56.10 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T06:48:51,698 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/ddec1bba54014e7f8b576a65055618f4 is 1080, key is row0062/info:/1731566899118/Put/seqid=0 2024-11-14T06:48:51,699 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/9e2c626bbd594592a318c44543f92e81 as hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/9e2c626bbd594592a318c44543f92e81 2024-11-14T06:48:51,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741875_1051 (size=183058) 2024-11-14T06:48:51,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41929 is added to blk_1073741875_1051 (size=183058) 2024-11-14T06:48:51,704 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/9e2c626bbd594592a318c44543f92e81, entries=12, sequenceid=313, filesize=17.5 K 2024-11-14T06:48:51,705 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/ddec1bba54014e7f8b576a65055618f4 as hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/ddec1bba54014e7f8b576a65055618f4 2024-11-14T06:48:51,705 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=10.51 KB/10760 for f1a89a5774b34afce0f3d8eb00754b02 in 22ms, sequenceid=313, compaction requested=false 2024-11-14T06:48:51,706 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f1a89a5774b34afce0f3d8eb00754b02: 2024-11-14T06:48:51,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39097 {}] regionserver.HRegion(8855): Flush requested on f1a89a5774b34afce0f3d8eb00754b02 2024-11-14T06:48:51,707 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f1a89a5774b34afce0f3d8eb00754b02 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-14T06:48:51,711 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/033deb4afd5a45018f68c2c43e1059bc is 1080, key is row0238/info:/1731566931684/Put/seqid=0 2024-11-14T06:48:51,712 INFO [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in f1a89a5774b34afce0f3d8eb00754b02/info of f1a89a5774b34afce0f3d8eb00754b02 into ddec1bba54014e7f8b576a65055618f4(size=178.8 K), total size for store is 196.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T06:48:51,712 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for f1a89a5774b34afce0f3d8eb00754b02: 2024-11-14T06:48:51,712 INFO [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731566909273.f1a89a5774b34afce0f3d8eb00754b02., storeName=f1a89a5774b34afce0f3d8eb00754b02/info, priority=13, startTime=1731566931682; duration=0sec 2024-11-14T06:48:51,712 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T06:48:51,712 DEBUG [RS:0;20680646cf8a:39097-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f1a89a5774b34afce0f3d8eb00754b02:info 2024-11-14T06:48:51,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41929 is added to blk_1073741876_1052 (size=17918) 2024-11-14T06:48:51,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741876_1052 (size=17918) 2024-11-14T06:48:51,719 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=328 (bloomFilter=true), to=hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/033deb4afd5a45018f68c2c43e1059bc 2024-11-14T06:48:51,724 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/033deb4afd5a45018f68c2c43e1059bc as hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/033deb4afd5a45018f68c2c43e1059bc 2024-11-14T06:48:51,729 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/033deb4afd5a45018f68c2c43e1059bc, entries=12, sequenceid=328, filesize=17.5 K 2024-11-14T06:48:51,730 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=7.36 KB/7532 for f1a89a5774b34afce0f3d8eb00754b02 in 22ms, sequenceid=328, compaction requested=true 2024-11-14T06:48:51,730 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f1a89a5774b34afce0f3d8eb00754b02: 2024-11-14T06:48:51,730 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f1a89a5774b34afce0f3d8eb00754b02:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T06:48:51,730 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T06:48:51,730 DEBUG [RS:0;20680646cf8a:39097-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T06:48:51,731 DEBUG [RS:0;20680646cf8a:39097-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 218894 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T06:48:51,731 DEBUG [RS:0;20680646cf8a:39097-longCompactions-0 {}] regionserver.HStore(1541): f1a89a5774b34afce0f3d8eb00754b02/info is initiating minor compaction (all files) 2024-11-14T06:48:51,731 INFO [RS:0;20680646cf8a:39097-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of f1a89a5774b34afce0f3d8eb00754b02/info in TestLogRolling-testLogRolling,row0062,1731566909273.f1a89a5774b34afce0f3d8eb00754b02. 2024-11-14T06:48:51,731 INFO [RS:0;20680646cf8a:39097-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/ddec1bba54014e7f8b576a65055618f4, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/9e2c626bbd594592a318c44543f92e81, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/033deb4afd5a45018f68c2c43e1059bc] into tmpdir=hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp, totalSize=213.8 K 2024-11-14T06:48:51,732 DEBUG [RS:0;20680646cf8a:39097-longCompactions-0 {}] compactions.Compactor(225): Compacting ddec1bba54014e7f8b576a65055618f4, keycount=164, bloomtype=ROW, size=178.8 K, encoding=NONE, compression=NONE, seqNum=298, earliestPutTs=1731566899118 2024-11-14T06:48:51,732 DEBUG [RS:0;20680646cf8a:39097-longCompactions-0 {}] compactions.Compactor(225): Compacting 9e2c626bbd594592a318c44543f92e81, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=313, earliestPutTs=1731566931663 2024-11-14T06:48:51,732 DEBUG [RS:0;20680646cf8a:39097-longCompactions-0 {}] compactions.Compactor(225): Compacting 033deb4afd5a45018f68c2c43e1059bc, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=328, earliestPutTs=1731566931684 2024-11-14T06:48:51,743 INFO [RS:0;20680646cf8a:39097-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f1a89a5774b34afce0f3d8eb00754b02#info#compaction#92 average throughput is 64.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T06:48:51,744 DEBUG [RS:0;20680646cf8a:39097-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/32bdd04235694124ac0cbe08da2b86f4 is 1080, key is row0062/info:/1731566899118/Put/seqid=0 2024-11-14T06:48:51,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741877_1053 (size=209133) 2024-11-14T06:48:51,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41929 is added to blk_1073741877_1053 (size=209133) 2024-11-14T06:48:51,751 DEBUG [RS:0;20680646cf8a:39097-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/32bdd04235694124ac0cbe08da2b86f4 as hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/32bdd04235694124ac0cbe08da2b86f4 2024-11-14T06:48:51,757 INFO [RS:0;20680646cf8a:39097-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in f1a89a5774b34afce0f3d8eb00754b02/info of f1a89a5774b34afce0f3d8eb00754b02 into 32bdd04235694124ac0cbe08da2b86f4(size=204.2 K), total size for store is 204.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T06:48:51,757 DEBUG [RS:0;20680646cf8a:39097-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for f1a89a5774b34afce0f3d8eb00754b02: 2024-11-14T06:48:51,757 INFO [RS:0;20680646cf8a:39097-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731566909273.f1a89a5774b34afce0f3d8eb00754b02., storeName=f1a89a5774b34afce0f3d8eb00754b02/info, priority=13, startTime=1731566931730; duration=0sec 2024-11-14T06:48:51,758 DEBUG [RS:0;20680646cf8a:39097-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T06:48:51,758 DEBUG [RS:0;20680646cf8a:39097-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f1a89a5774b34afce0f3d8eb00754b02:info 2024-11-14T06:48:51,778 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20340 2024-11-14T06:48:52,384 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:52,384 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:53,384 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:53,384 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:53,721 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-11-14T06:48:53,721 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 20680646cf8a%2C39097%2C1731566886006.1731566933721 2024-11-14T06:48:53,726 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:53,726 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:53,726 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:53,727 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:53,727 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:53,727 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/WALs/20680646cf8a,39097,1731566886006/20680646cf8a%2C39097%2C1731566886006.1731566886397 with entries=316, filesize=309.65 KB; new WAL /user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/WALs/20680646cf8a,39097,1731566886006/20680646cf8a%2C39097%2C1731566886006.1731566933721 2024-11-14T06:48:53,728 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42521:42521),(127.0.0.1/127.0.0.1:39903:39903)] 2024-11-14T06:48:53,728 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/WALs/20680646cf8a,39097,1731566886006/20680646cf8a%2C39097%2C1731566886006.1731566886397 is not closed yet, will try archiving it next time 2024-11-14T06:48:53,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41929 is added to blk_1073741833_1009 (size=317093) 2024-11-14T06:48:53,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741833_1009 (size=317093) 2024-11-14T06:48:53,731 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 6cf8faecc28e15c306ac8f738165ab4b: 2024-11-14T06:48:53,731 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=705 B heapSize=2.05 KB 2024-11-14T06:48:53,736 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/hbase/meta/1588230740/.tmp/info/2a0422cf8c0246b882efaba5afd4a59a is 193, key is TestLogRolling-testLogRolling,row0062,1731566909273.f1a89a5774b34afce0f3d8eb00754b02./info:regioninfo/1731566909960/Put/seqid=0 2024-11-14T06:48:53,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741879_1055 (size=6223) 2024-11-14T06:48:53,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41929 is added to blk_1073741879_1055 (size=6223) 2024-11-14T06:48:53,741 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=705 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/hbase/meta/1588230740/.tmp/info/2a0422cf8c0246b882efaba5afd4a59a 2024-11-14T06:48:53,746 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/hbase/meta/1588230740/.tmp/info/2a0422cf8c0246b882efaba5afd4a59a as hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/hbase/meta/1588230740/info/2a0422cf8c0246b882efaba5afd4a59a 2024-11-14T06:48:53,751 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/hbase/meta/1588230740/info/2a0422cf8c0246b882efaba5afd4a59a, entries=5, sequenceid=21, filesize=6.1 K 2024-11-14T06:48:53,752 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~705 B/705, heapSize ~1.29 KB/1320, currentSize=0 B/0 for 1588230740 in 21ms, sequenceid=21, compaction requested=false 2024-11-14T06:48:53,752 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-14T06:48:53,752 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing f1a89a5774b34afce0f3d8eb00754b02 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-14T06:48:53,755 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/8f7f1ac06c49420e807f4c34d89e39d1 is 1080, key is row0250/info:/1731566931709/Put/seqid=0 2024-11-14T06:48:53,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41929 is added to blk_1073741880_1056 (size=12523) 2024-11-14T06:48:53,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741880_1056 (size=12523) 2024-11-14T06:48:53,759 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=340 (bloomFilter=true), to=hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/8f7f1ac06c49420e807f4c34d89e39d1 2024-11-14T06:48:53,764 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/.tmp/info/8f7f1ac06c49420e807f4c34d89e39d1 as hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/8f7f1ac06c49420e807f4c34d89e39d1 2024-11-14T06:48:53,770 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/8f7f1ac06c49420e807f4c34d89e39d1, entries=7, sequenceid=340, filesize=12.2 K 2024-11-14T06:48:53,771 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for f1a89a5774b34afce0f3d8eb00754b02 in 19ms, sequenceid=340, compaction requested=false 2024-11-14T06:48:53,771 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for f1a89a5774b34afce0f3d8eb00754b02: 2024-11-14T06:48:53,771 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 20680646cf8a%2C39097%2C1731566886006.1731566933771 2024-11-14T06:48:53,779 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:53,779 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:53,779 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:53,779 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:53,779 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:53,779 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/WALs/20680646cf8a,39097,1731566886006/20680646cf8a%2C39097%2C1731566886006.1731566933721 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/WALs/20680646cf8a,39097,1731566886006/20680646cf8a%2C39097%2C1731566886006.1731566933771 2024-11-14T06:48:53,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741878_1054 (size=731) 2024-11-14T06:48:53,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41929 is added to blk_1073741878_1054 (size=731) 2024-11-14T06:48:53,781 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42521:42521),(127.0.0.1/127.0.0.1:39903:39903)] 2024-11-14T06:48:53,781 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/WALs/20680646cf8a,39097,1731566886006/20680646cf8a%2C39097%2C1731566886006.1731566933721 is not closed yet, will try archiving it next time 2024-11-14T06:48:53,784 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/WALs/20680646cf8a,39097,1731566886006/20680646cf8a%2C39097%2C1731566886006.1731566886397 to hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/oldWALs/20680646cf8a%2C39097%2C1731566886006.1731566886397 2024-11-14T06:48:53,784 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-11-14T06:48:53,785 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-14T06:48:53,785 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T06:48:53,785 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/WALs/20680646cf8a,39097,1731566886006/20680646cf8a%2C39097%2C1731566886006.1731566933721 to hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/oldWALs/20680646cf8a%2C39097%2C1731566886006.1731566933721 2024-11-14T06:48:53,785 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T06:48:53,785 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T06:48:53,785 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T06:48:53,785 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-14T06:48:53,785 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-14T06:48:53,785 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1610846666, stopped=false 2024-11-14T06:48:53,785 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=20680646cf8a,32979,1731566885953 2024-11-14T06:48:53,786 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39097-0x1003cfd38a10001, quorum=127.0.0.1:56956, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T06:48:53,786 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32979-0x1003cfd38a10000, quorum=127.0.0.1:56956, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T06:48:53,786 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39097-0x1003cfd38a10001, quorum=127.0.0.1:56956, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:48:53,786 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32979-0x1003cfd38a10000, quorum=127.0.0.1:56956, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:48:53,787 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T06:48:53,787 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T06:48:53,787 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T06:48:53,787 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T06:48:53,787 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '20680646cf8a,39097,1731566886006' ***** 2024-11-14T06:48:53,787 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-14T06:48:53,787 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39097-0x1003cfd38a10001, quorum=127.0.0.1:56956, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T06:48:53,787 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:32979-0x1003cfd38a10000, quorum=127.0.0.1:56956, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T06:48:53,788 INFO [RS:0;20680646cf8a:39097 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-14T06:48:53,788 INFO [RS:0;20680646cf8a:39097 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-14T06:48:53,788 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-14T06:48:53,788 INFO [RS:0;20680646cf8a:39097 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-14T06:48:53,788 INFO [RS:0;20680646cf8a:39097 {}] regionserver.HRegionServer(3091): Received CLOSE for 6cf8faecc28e15c306ac8f738165ab4b 2024-11-14T06:48:53,788 INFO [RS:0;20680646cf8a:39097 {}] regionserver.HRegionServer(3091): Received CLOSE for f1a89a5774b34afce0f3d8eb00754b02 2024-11-14T06:48:53,788 INFO [RS:0;20680646cf8a:39097 {}] regionserver.HRegionServer(959): stopping server 20680646cf8a,39097,1731566886006 2024-11-14T06:48:53,788 INFO [RS:0;20680646cf8a:39097 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T06:48:53,788 DEBUG [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 6cf8faecc28e15c306ac8f738165ab4b, disabling compactions & flushes 2024-11-14T06:48:53,788 INFO [RS:0;20680646cf8a:39097 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;20680646cf8a:39097. 2024-11-14T06:48:53,788 INFO [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731566909273.6cf8faecc28e15c306ac8f738165ab4b. 2024-11-14T06:48:53,788 DEBUG [RS:0;20680646cf8a:39097 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T06:48:53,788 DEBUG [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731566909273.6cf8faecc28e15c306ac8f738165ab4b. 2024-11-14T06:48:53,788 DEBUG [RS:0;20680646cf8a:39097 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T06:48:53,788 DEBUG [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731566909273.6cf8faecc28e15c306ac8f738165ab4b. after waiting 0 ms 2024-11-14T06:48:53,788 DEBUG [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731566909273.6cf8faecc28e15c306ac8f738165ab4b. 2024-11-14T06:48:53,788 INFO [RS:0;20680646cf8a:39097 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-14T06:48:53,789 INFO [RS:0;20680646cf8a:39097 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-14T06:48:53,789 INFO [RS:0;20680646cf8a:39097 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-14T06:48:53,789 INFO [RS:0;20680646cf8a:39097 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-14T06:48:53,789 INFO [RS:0;20680646cf8a:39097 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-14T06:48:53,789 DEBUG [RS:0;20680646cf8a:39097 {}] regionserver.HRegionServer(1325): Online Regions={6cf8faecc28e15c306ac8f738165ab4b=TestLogRolling-testLogRolling,,1731566909273.6cf8faecc28e15c306ac8f738165ab4b., 1588230740=hbase:meta,,1.1588230740, f1a89a5774b34afce0f3d8eb00754b02=TestLogRolling-testLogRolling,row0062,1731566909273.f1a89a5774b34afce0f3d8eb00754b02.} 2024-11-14T06:48:53,789 DEBUG [RS:0;20680646cf8a:39097 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 6cf8faecc28e15c306ac8f738165ab4b, f1a89a5774b34afce0f3d8eb00754b02 2024-11-14T06:48:53,789 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T06:48:53,789 INFO [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T06:48:53,789 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T06:48:53,789 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T06:48:53,789 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T06:48:53,789 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731566909273.6cf8faecc28e15c306ac8f738165ab4b.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/6cf8faecc28e15c306ac8f738165ab4b/info/34e9b51ef2194bb78ee2ae209ab3a4ad.aa84aa8e79ae0ab4ee31e90c0e08f200->hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/info/34e9b51ef2194bb78ee2ae209ab3a4ad-bottom] to archive 2024-11-14T06:48:53,790 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731566909273.6cf8faecc28e15c306ac8f738165ab4b.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-14T06:48:53,791 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731566909273.6cf8faecc28e15c306ac8f738165ab4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/6cf8faecc28e15c306ac8f738165ab4b/info/34e9b51ef2194bb78ee2ae209ab3a4ad.aa84aa8e79ae0ab4ee31e90c0e08f200 to hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/archive/data/default/TestLogRolling-testLogRolling/6cf8faecc28e15c306ac8f738165ab4b/info/34e9b51ef2194bb78ee2ae209ab3a4ad.aa84aa8e79ae0ab4ee31e90c0e08f200 2024-11-14T06:48:53,792 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731566909273.6cf8faecc28e15c306ac8f738165ab4b.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=20680646cf8a:32979 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-14T06:48:53,792 WARN [StoreCloser-TestLogRolling-testLogRolling,,1731566909273.6cf8faecc28e15c306ac8f738165ab4b.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-11-14T06:48:53,793 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-14T06:48:53,793 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T06:48:53,793 INFO [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T06:48:53,793 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731566933789Running coprocessor pre-close hooks at 1731566933789Disabling compacts and flushes for region at 1731566933789Disabling writes for close at 1731566933789Writing region close event to WAL at 1731566933790 (+1 ms)Running coprocessor post-close hooks at 1731566933793 (+3 ms)Closed at 1731566933793 2024-11-14T06:48:53,793 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-14T06:48:53,795 DEBUG [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/6cf8faecc28e15c306ac8f738165ab4b/recovered.edits/93.seqid, newMaxSeqId=93, maxSeqId=88 2024-11-14T06:48:53,796 INFO [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731566909273.6cf8faecc28e15c306ac8f738165ab4b. 2024-11-14T06:48:53,796 DEBUG [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 6cf8faecc28e15c306ac8f738165ab4b: Waiting for close lock at 1731566933788Running coprocessor pre-close hooks at 1731566933788Disabling compacts and flushes for region at 1731566933788Disabling writes for close at 1731566933788Writing region close event to WAL at 1731566933792 (+4 ms)Running coprocessor post-close hooks at 1731566933796 (+4 ms)Closed at 1731566933796 2024-11-14T06:48:53,796 DEBUG [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1731566909273.6cf8faecc28e15c306ac8f738165ab4b. 2024-11-14T06:48:53,796 DEBUG [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing f1a89a5774b34afce0f3d8eb00754b02, disabling compactions & flushes 2024-11-14T06:48:53,796 INFO [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1731566909273.f1a89a5774b34afce0f3d8eb00754b02. 2024-11-14T06:48:53,796 DEBUG [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1731566909273.f1a89a5774b34afce0f3d8eb00754b02. 2024-11-14T06:48:53,796 DEBUG [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1731566909273.f1a89a5774b34afce0f3d8eb00754b02. after waiting 0 ms 2024-11-14T06:48:53,796 DEBUG [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1731566909273.f1a89a5774b34afce0f3d8eb00754b02. 2024-11-14T06:48:53,796 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731566909273.f1a89a5774b34afce0f3d8eb00754b02.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/34e9b51ef2194bb78ee2ae209ab3a4ad.aa84aa8e79ae0ab4ee31e90c0e08f200->hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/aa84aa8e79ae0ab4ee31e90c0e08f200/info/34e9b51ef2194bb78ee2ae209ab3a4ad-top, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/a45065c280694007a1ab6006b02cf117, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/TestLogRolling-testLogRolling=aa84aa8e79ae0ab4ee31e90c0e08f200-619eed19b1b54a86a469b7fc44cededb, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/6c35ad91950c4890b209fa4ac0fb1151, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/a07b4498e4d64c97b0bd1e49e2cb9a95, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/d9cfc9dbbeab424b8aff63409db7eaa1, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/f08d14fd307a4c498f1ba8b00d7207fa, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/9b5614b4c990427a80376028ac39d41f, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/826518e1ab1e41aa8fd7b8408cab26d3, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/b104b4c2904b4d5e8ffe288251bc7b61, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/cfe66d62a1564551b15003dde326833b, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/7230f9ff4ad941568dfc2519daf9e918, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/7dd3aa085ae4432495572695e7acf398, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/4f28c44821d14776bd6a75f1bc75c3c1, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/e5be554c7a2f4a22802cfd6c17340b06, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/ee13fd64ddb24a26a8432ad6096ccd1f, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/146e5e291518436b8644624ae5fbb4c0, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/39fc539a27194e8ba070f62a65c2ddda, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/65f899fac698449d8802844382165caf, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/5a0572fbb53b4a88ab49306e27b53996, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/d09fe7e3ceaa42e8a08133cfc6e4048a, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/891ef293293240589570c2eafe8338f6, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/ddec1bba54014e7f8b576a65055618f4, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/bf6da3278e3b40c1a2a2c9bb7b34b6ed, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/9e2c626bbd594592a318c44543f92e81, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/033deb4afd5a45018f68c2c43e1059bc] to archive 2024-11-14T06:48:53,798 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731566909273.f1a89a5774b34afce0f3d8eb00754b02.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-14T06:48:53,799 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731566909273.f1a89a5774b34afce0f3d8eb00754b02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/34e9b51ef2194bb78ee2ae209ab3a4ad.aa84aa8e79ae0ab4ee31e90c0e08f200 to hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/archive/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/34e9b51ef2194bb78ee2ae209ab3a4ad.aa84aa8e79ae0ab4ee31e90c0e08f200 2024-11-14T06:48:53,800 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731566909273.f1a89a5774b34afce0f3d8eb00754b02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/a45065c280694007a1ab6006b02cf117 to hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/archive/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/a45065c280694007a1ab6006b02cf117 2024-11-14T06:48:53,801 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731566909273.f1a89a5774b34afce0f3d8eb00754b02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/TestLogRolling-testLogRolling=aa84aa8e79ae0ab4ee31e90c0e08f200-619eed19b1b54a86a469b7fc44cededb to hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/archive/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/TestLogRolling-testLogRolling=aa84aa8e79ae0ab4ee31e90c0e08f200-619eed19b1b54a86a469b7fc44cededb 2024-11-14T06:48:53,802 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731566909273.f1a89a5774b34afce0f3d8eb00754b02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/6c35ad91950c4890b209fa4ac0fb1151 to hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/archive/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/6c35ad91950c4890b209fa4ac0fb1151 2024-11-14T06:48:53,803 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731566909273.f1a89a5774b34afce0f3d8eb00754b02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/a07b4498e4d64c97b0bd1e49e2cb9a95 to hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/archive/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/a07b4498e4d64c97b0bd1e49e2cb9a95 2024-11-14T06:48:53,804 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731566909273.f1a89a5774b34afce0f3d8eb00754b02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/d9cfc9dbbeab424b8aff63409db7eaa1 to hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/archive/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/d9cfc9dbbeab424b8aff63409db7eaa1 2024-11-14T06:48:53,805 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731566909273.f1a89a5774b34afce0f3d8eb00754b02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/f08d14fd307a4c498f1ba8b00d7207fa to hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/archive/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/f08d14fd307a4c498f1ba8b00d7207fa 2024-11-14T06:48:53,806 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731566909273.f1a89a5774b34afce0f3d8eb00754b02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/9b5614b4c990427a80376028ac39d41f to hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/archive/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/9b5614b4c990427a80376028ac39d41f 2024-11-14T06:48:53,808 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731566909273.f1a89a5774b34afce0f3d8eb00754b02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/826518e1ab1e41aa8fd7b8408cab26d3 to hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/archive/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/826518e1ab1e41aa8fd7b8408cab26d3 2024-11-14T06:48:53,809 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731566909273.f1a89a5774b34afce0f3d8eb00754b02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/b104b4c2904b4d5e8ffe288251bc7b61 to hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/archive/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/b104b4c2904b4d5e8ffe288251bc7b61 2024-11-14T06:48:53,811 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731566909273.f1a89a5774b34afce0f3d8eb00754b02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/cfe66d62a1564551b15003dde326833b to hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/archive/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/cfe66d62a1564551b15003dde326833b 2024-11-14T06:48:53,812 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731566909273.f1a89a5774b34afce0f3d8eb00754b02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/7230f9ff4ad941568dfc2519daf9e918 to hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/archive/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/7230f9ff4ad941568dfc2519daf9e918 2024-11-14T06:48:53,813 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731566909273.f1a89a5774b34afce0f3d8eb00754b02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/7dd3aa085ae4432495572695e7acf398 to hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/archive/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/7dd3aa085ae4432495572695e7acf398 2024-11-14T06:48:53,814 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731566909273.f1a89a5774b34afce0f3d8eb00754b02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/4f28c44821d14776bd6a75f1bc75c3c1 to hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/archive/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/4f28c44821d14776bd6a75f1bc75c3c1 2024-11-14T06:48:53,814 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731566909273.f1a89a5774b34afce0f3d8eb00754b02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/e5be554c7a2f4a22802cfd6c17340b06 to hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/archive/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/e5be554c7a2f4a22802cfd6c17340b06 2024-11-14T06:48:53,815 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731566909273.f1a89a5774b34afce0f3d8eb00754b02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/ee13fd64ddb24a26a8432ad6096ccd1f to hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/archive/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/ee13fd64ddb24a26a8432ad6096ccd1f 2024-11-14T06:48:53,816 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731566909273.f1a89a5774b34afce0f3d8eb00754b02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/146e5e291518436b8644624ae5fbb4c0 to hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/archive/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/146e5e291518436b8644624ae5fbb4c0 2024-11-14T06:48:53,817 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731566909273.f1a89a5774b34afce0f3d8eb00754b02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/39fc539a27194e8ba070f62a65c2ddda to hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/archive/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/39fc539a27194e8ba070f62a65c2ddda 2024-11-14T06:48:53,818 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731566909273.f1a89a5774b34afce0f3d8eb00754b02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/65f899fac698449d8802844382165caf to hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/archive/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/65f899fac698449d8802844382165caf 2024-11-14T06:48:53,819 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731566909273.f1a89a5774b34afce0f3d8eb00754b02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/5a0572fbb53b4a88ab49306e27b53996 to hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/archive/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/5a0572fbb53b4a88ab49306e27b53996 2024-11-14T06:48:53,820 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731566909273.f1a89a5774b34afce0f3d8eb00754b02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/d09fe7e3ceaa42e8a08133cfc6e4048a to hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/archive/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/d09fe7e3ceaa42e8a08133cfc6e4048a 2024-11-14T06:48:53,821 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731566909273.f1a89a5774b34afce0f3d8eb00754b02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/891ef293293240589570c2eafe8338f6 to hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/archive/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/891ef293293240589570c2eafe8338f6 2024-11-14T06:48:53,822 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731566909273.f1a89a5774b34afce0f3d8eb00754b02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/ddec1bba54014e7f8b576a65055618f4 to hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/archive/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/ddec1bba54014e7f8b576a65055618f4 2024-11-14T06:48:53,823 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731566909273.f1a89a5774b34afce0f3d8eb00754b02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/bf6da3278e3b40c1a2a2c9bb7b34b6ed to hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/archive/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/bf6da3278e3b40c1a2a2c9bb7b34b6ed 2024-11-14T06:48:53,824 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731566909273.f1a89a5774b34afce0f3d8eb00754b02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/9e2c626bbd594592a318c44543f92e81 to hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/archive/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/9e2c626bbd594592a318c44543f92e81 2024-11-14T06:48:53,824 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731566909273.f1a89a5774b34afce0f3d8eb00754b02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/033deb4afd5a45018f68c2c43e1059bc to hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/archive/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/info/033deb4afd5a45018f68c2c43e1059bc 2024-11-14T06:48:53,825 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1731566909273.f1a89a5774b34afce0f3d8eb00754b02.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [a45065c280694007a1ab6006b02cf117=8359, 6c35ad91950c4890b209fa4ac0fb1151=12509, a07b4498e4d64c97b0bd1e49e2cb9a95=28952, d9cfc9dbbeab424b8aff63409db7eaa1=17894, f08d14fd307a4c498f1ba8b00d7207fa=18988, 9b5614b4c990427a80376028ac39d41f=50638, 826518e1ab1e41aa8fd7b8408cab26d3=12516, b104b4c2904b4d5e8ffe288251bc7b61=17906, cfe66d62a1564551b15003dde326833b=78909, 7230f9ff4ad941568dfc2519daf9e918=20078, 7dd3aa085ae4432495572695e7acf398=16828, 4f28c44821d14776bd6a75f1bc75c3c1=108125, e5be554c7a2f4a22802cfd6c17340b06=22238, ee13fd64ddb24a26a8432ad6096ccd1f=12516, 146e5e291518436b8644624ae5fbb4c0=134231, 39fc539a27194e8ba070f62a65c2ddda=23316, 65f899fac698449d8802844382165caf=19000, 5a0572fbb53b4a88ab49306e27b53996=160293, d09fe7e3ceaa42e8a08133cfc6e4048a=16839, 891ef293293240589570c2eafe8338f6=19013, ddec1bba54014e7f8b576a65055618f4=183058, bf6da3278e3b40c1a2a2c9bb7b34b6ed=13602, 9e2c626bbd594592a318c44543f92e81=17918, 033deb4afd5a45018f68c2c43e1059bc=17918] 2024-11-14T06:48:53,828 DEBUG [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/data/default/TestLogRolling-testLogRolling/f1a89a5774b34afce0f3d8eb00754b02/recovered.edits/343.seqid, newMaxSeqId=343, maxSeqId=88 2024-11-14T06:48:53,828 INFO [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1731566909273.f1a89a5774b34afce0f3d8eb00754b02. 2024-11-14T06:48:53,828 DEBUG [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for f1a89a5774b34afce0f3d8eb00754b02: Waiting for close lock at 1731566933796Running coprocessor pre-close hooks at 1731566933796Disabling compacts and flushes for region at 1731566933796Disabling writes for close at 1731566933796Writing region close event to WAL at 1731566933825 (+29 ms)Running coprocessor post-close hooks at 1731566933828 (+3 ms)Closed at 1731566933828 2024-11-14T06:48:53,828 DEBUG [RS_CLOSE_REGION-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1731566909273.f1a89a5774b34afce0f3d8eb00754b02. 2024-11-14T06:48:53,989 INFO [RS:0;20680646cf8a:39097 {}] regionserver.HRegionServer(976): stopping server 20680646cf8a,39097,1731566886006; all regions closed. 2024-11-14T06:48:53,990 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:53,990 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:53,991 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:53,991 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:53,991 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:53,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741834_1010 (size=8107) 2024-11-14T06:48:53,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41929 is added to blk_1073741834_1010 (size=8107) 2024-11-14T06:48:53,999 DEBUG [RS:0;20680646cf8a:39097 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/oldWALs 2024-11-14T06:48:53,999 INFO [RS:0;20680646cf8a:39097 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 20680646cf8a%2C39097%2C1731566886006.meta:.meta(num 1731566886769) 2024-11-14T06:48:54,000 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:54,000 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:54,000 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:54,000 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:54,000 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:54,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741881_1057 (size=778) 2024-11-14T06:48:54,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41929 is added to blk_1073741881_1057 (size=778) 2024-11-14T06:48:54,005 DEBUG [RS:0;20680646cf8a:39097 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/oldWALs 2024-11-14T06:48:54,005 INFO [RS:0;20680646cf8a:39097 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 20680646cf8a%2C39097%2C1731566886006:(num 1731566933771) 2024-11-14T06:48:54,005 DEBUG [RS:0;20680646cf8a:39097 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T06:48:54,005 INFO [RS:0;20680646cf8a:39097 {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T06:48:54,005 INFO [RS:0;20680646cf8a:39097 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T06:48:54,005 INFO [RS:0;20680646cf8a:39097 {}] hbase.ChoreService(370): Chore service for: regionserver/20680646cf8a:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-14T06:48:54,005 INFO [RS:0;20680646cf8a:39097 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T06:48:54,006 INFO [regionserver/20680646cf8a:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T06:48:54,006 INFO [RS:0;20680646cf8a:39097 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39097 2024-11-14T06:48:54,007 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39097-0x1003cfd38a10001, quorum=127.0.0.1:56956, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/20680646cf8a,39097,1731566886006 2024-11-14T06:48:54,007 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32979-0x1003cfd38a10000, quorum=127.0.0.1:56956, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T06:48:54,007 INFO [RS:0;20680646cf8a:39097 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T06:48:54,008 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [20680646cf8a,39097,1731566886006] 2024-11-14T06:48:54,009 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/20680646cf8a,39097,1731566886006 already deleted, retry=false 2024-11-14T06:48:54,009 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 20680646cf8a,39097,1731566886006 expired; onlineServers=0 2024-11-14T06:48:54,009 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '20680646cf8a,32979,1731566885953' ***** 2024-11-14T06:48:54,009 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-14T06:48:54,009 INFO [M:0;20680646cf8a:32979 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T06:48:54,009 INFO [M:0;20680646cf8a:32979 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T06:48:54,009 DEBUG [M:0;20680646cf8a:32979 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-14T06:48:54,009 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-14T06:48:54,009 DEBUG [master/20680646cf8a:0:becomeActiveMaster-HFileCleaner.large.0-1731566886167 {}] cleaner.HFileCleaner(306): Exit Thread[master/20680646cf8a:0:becomeActiveMaster-HFileCleaner.large.0-1731566886167,5,FailOnTimeoutGroup] 2024-11-14T06:48:54,009 DEBUG [M:0;20680646cf8a:32979 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-14T06:48:54,009 DEBUG [master/20680646cf8a:0:becomeActiveMaster-HFileCleaner.small.0-1731566886168 {}] cleaner.HFileCleaner(306): Exit Thread[master/20680646cf8a:0:becomeActiveMaster-HFileCleaner.small.0-1731566886168,5,FailOnTimeoutGroup] 2024-11-14T06:48:54,010 INFO [M:0;20680646cf8a:32979 {}] hbase.ChoreService(370): Chore service for: master/20680646cf8a:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-14T06:48:54,010 INFO [M:0;20680646cf8a:32979 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T06:48:54,010 DEBUG [M:0;20680646cf8a:32979 {}] master.HMaster(1795): Stopping service threads 2024-11-14T06:48:54,010 INFO [M:0;20680646cf8a:32979 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-14T06:48:54,010 INFO [M:0;20680646cf8a:32979 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T06:48:54,010 INFO [M:0;20680646cf8a:32979 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-14T06:48:54,010 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32979-0x1003cfd38a10000, quorum=127.0.0.1:56956, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-14T06:48:54,010 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-14T06:48:54,011 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32979-0x1003cfd38a10000, quorum=127.0.0.1:56956, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:48:54,011 DEBUG [M:0;20680646cf8a:32979 {}] zookeeper.ZKUtil(347): master:32979-0x1003cfd38a10000, quorum=127.0.0.1:56956, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-14T06:48:54,011 WARN [M:0;20680646cf8a:32979 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-14T06:48:54,011 INFO [M:0;20680646cf8a:32979 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/.lastflushedseqids 2024-11-14T06:48:54,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741882_1058 (size=228) 2024-11-14T06:48:54,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41929 is added to blk_1073741882_1058 (size=228) 2024-11-14T06:48:54,017 INFO [M:0;20680646cf8a:32979 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-14T06:48:54,017 INFO [M:0;20680646cf8a:32979 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-14T06:48:54,018 DEBUG [M:0;20680646cf8a:32979 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T06:48:54,018 INFO [M:0;20680646cf8a:32979 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T06:48:54,018 DEBUG [M:0;20680646cf8a:32979 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T06:48:54,018 DEBUG [M:0;20680646cf8a:32979 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T06:48:54,018 DEBUG [M:0;20680646cf8a:32979 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T06:48:54,018 INFO [M:0;20680646cf8a:32979 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.42 KB heapSize=63.35 KB 2024-11-14T06:48:54,031 DEBUG [M:0;20680646cf8a:32979 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a2b03319291c49f6bff1b9df22ae278e is 82, key is hbase:meta,,1/info:regioninfo/1731566886801/Put/seqid=0 2024-11-14T06:48:54,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741883_1059 (size=5672) 2024-11-14T06:48:54,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41929 is added to blk_1073741883_1059 (size=5672) 2024-11-14T06:48:54,036 INFO [M:0;20680646cf8a:32979 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a2b03319291c49f6bff1b9df22ae278e 2024-11-14T06:48:54,058 DEBUG [M:0;20680646cf8a:32979 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/034438de02e341949535747843787993 is 750, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731566887324/Put/seqid=0 2024-11-14T06:48:54,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741884_1060 (size=7090) 2024-11-14T06:48:54,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41929 is added to blk_1073741884_1060 (size=7090) 2024-11-14T06:48:54,063 INFO [M:0;20680646cf8a:32979 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.81 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/034438de02e341949535747843787993 2024-11-14T06:48:54,067 INFO [M:0;20680646cf8a:32979 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 034438de02e341949535747843787993 2024-11-14T06:48:54,080 DEBUG [M:0;20680646cf8a:32979 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d2a9d1a813b34172999077271d59afe5 is 69, key is 20680646cf8a,39097,1731566886006/rs:state/1731566886244/Put/seqid=0 2024-11-14T06:48:54,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41929 is added to blk_1073741885_1061 (size=5156) 2024-11-14T06:48:54,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741885_1061 (size=5156) 2024-11-14T06:48:54,084 INFO [M:0;20680646cf8a:32979 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d2a9d1a813b34172999077271d59afe5 2024-11-14T06:48:54,100 DEBUG [M:0;20680646cf8a:32979 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4eef499dc29d4881abadcfd4ce5b1235 is 52, key is load_balancer_on/state:d/1731566886937/Put/seqid=0 2024-11-14T06:48:54,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41929 is added to blk_1073741886_1062 (size=5056) 2024-11-14T06:48:54,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741886_1062 (size=5056) 2024-11-14T06:48:54,105 INFO [M:0;20680646cf8a:32979 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4eef499dc29d4881abadcfd4ce5b1235 2024-11-14T06:48:54,108 INFO [RS:0;20680646cf8a:39097 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T06:48:54,108 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39097-0x1003cfd38a10001, quorum=127.0.0.1:56956, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T06:48:54,108 INFO [RS:0;20680646cf8a:39097 {}] regionserver.HRegionServer(1031): Exiting; stopping=20680646cf8a,39097,1731566886006; zookeeper connection closed. 2024-11-14T06:48:54,108 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39097-0x1003cfd38a10001, quorum=127.0.0.1:56956, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T06:48:54,108 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1150836 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1150836 2024-11-14T06:48:54,109 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-14T06:48:54,110 DEBUG [M:0;20680646cf8a:32979 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a2b03319291c49f6bff1b9df22ae278e as hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a2b03319291c49f6bff1b9df22ae278e 2024-11-14T06:48:54,115 INFO [M:0;20680646cf8a:32979 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a2b03319291c49f6bff1b9df22ae278e, entries=8, sequenceid=125, filesize=5.5 K 2024-11-14T06:48:54,116 DEBUG [M:0;20680646cf8a:32979 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/034438de02e341949535747843787993 as hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/034438de02e341949535747843787993 2024-11-14T06:48:54,122 INFO [M:0;20680646cf8a:32979 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 034438de02e341949535747843787993 2024-11-14T06:48:54,122 INFO [M:0;20680646cf8a:32979 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/034438de02e341949535747843787993, entries=13, sequenceid=125, filesize=6.9 K 2024-11-14T06:48:54,123 DEBUG [M:0;20680646cf8a:32979 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d2a9d1a813b34172999077271d59afe5 as hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d2a9d1a813b34172999077271d59afe5 2024-11-14T06:48:54,127 INFO [M:0;20680646cf8a:32979 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d2a9d1a813b34172999077271d59afe5, entries=1, sequenceid=125, filesize=5.0 K 2024-11-14T06:48:54,128 DEBUG [M:0;20680646cf8a:32979 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4eef499dc29d4881abadcfd4ce5b1235 as hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/4eef499dc29d4881abadcfd4ce5b1235 2024-11-14T06:48:54,132 INFO [M:0;20680646cf8a:32979 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39515/user/jenkins/test-data/d748dd73-0e8d-bca2-1d2c-8bd9e981e1c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/4eef499dc29d4881abadcfd4ce5b1235, entries=1, sequenceid=125, filesize=4.9 K 2024-11-14T06:48:54,133 INFO [M:0;20680646cf8a:32979 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.42 KB/52651, heapSize ~63.29 KB/64808, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 115ms, sequenceid=125, compaction requested=false 2024-11-14T06:48:54,137 INFO [M:0;20680646cf8a:32979 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T06:48:54,137 DEBUG [M:0;20680646cf8a:32979 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731566934018Disabling compacts and flushes for region at 1731566934018Disabling writes for close at 1731566934018Obtaining lock to block concurrent updates at 1731566934018Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731566934018Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52651, getHeapSize=64808, getOffHeapSize=0, getCellsCount=148 at 1731566934018Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731566934019 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731566934019Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731566934031 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731566934031Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731566934040 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731566934058 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731566934058Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731566934067 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731566934079 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731566934079Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731566934088 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731566934100 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731566934100Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@28a1c37d: reopening flushed file at 1731566934109 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@38c18ba8: reopening flushed file at 1731566934115 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@25ffaf45: reopening flushed file at 1731566934122 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1bb2bcb9: reopening flushed file at 1731566934127 (+5 ms)Finished flush of dataSize ~51.42 KB/52651, heapSize ~63.29 KB/64808, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 115ms, sequenceid=125, compaction requested=false at 1731566934133 (+6 ms)Writing region close event to WAL at 1731566934137 (+4 ms)Closed at 1731566934137 2024-11-14T06:48:54,138 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:54,138 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:54,138 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:54,138 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:54,138 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:54,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41929 is added to blk_1073741830_1006 (size=61320) 2024-11-14T06:48:54,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741830_1006 (size=61320) 2024-11-14T06:48:54,140 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T06:48:54,140 INFO [M:0;20680646cf8a:32979 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-14T06:48:54,140 INFO [M:0;20680646cf8a:32979 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:32979 2024-11-14T06:48:54,141 INFO [M:0;20680646cf8a:32979 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T06:48:54,242 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32979-0x1003cfd38a10000, quorum=127.0.0.1:56956, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T06:48:54,242 INFO [M:0;20680646cf8a:32979 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T06:48:54,242 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32979-0x1003cfd38a10000, quorum=127.0.0.1:56956, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T06:48:54,244 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@550f8afc{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T06:48:54,244 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1b28e221{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T06:48:54,244 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T06:48:54,244 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2de80e16{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T06:48:54,244 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2735da07{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2e243ac2-bb22-2a9a-7458-441c2025024a/hadoop.log.dir/,STOPPED} 2024-11-14T06:48:54,246 WARN [BP-2010868496-172.17.0.2-1731566885368 heartbeating to localhost/127.0.0.1:39515 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T06:48:54,246 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T06:48:54,246 WARN [BP-2010868496-172.17.0.2-1731566885368 heartbeating to localhost/127.0.0.1:39515 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2010868496-172.17.0.2-1731566885368 (Datanode Uuid a9cb4c57-78c0-44d3-9f60-ddd309de3458) service to localhost/127.0.0.1:39515 2024-11-14T06:48:54,246 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T06:48:54,247 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2e243ac2-bb22-2a9a-7458-441c2025024a/cluster_66668d32-f255-2691-2a87-36d2a9a81533/data/data3/current/BP-2010868496-172.17.0.2-1731566885368 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T06:48:54,247 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2e243ac2-bb22-2a9a-7458-441c2025024a/cluster_66668d32-f255-2691-2a87-36d2a9a81533/data/data4/current/BP-2010868496-172.17.0.2-1731566885368 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T06:48:54,247 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T06:48:54,249 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@45bda0cb{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T06:48:54,250 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@79864455{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T06:48:54,250 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T06:48:54,250 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4a5ea7cd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T06:48:54,250 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6f8f17a1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2e243ac2-bb22-2a9a-7458-441c2025024a/hadoop.log.dir/,STOPPED} 2024-11-14T06:48:54,252 WARN [BP-2010868496-172.17.0.2-1731566885368 heartbeating to localhost/127.0.0.1:39515 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T06:48:54,252 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T06:48:54,252 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T06:48:54,252 WARN [BP-2010868496-172.17.0.2-1731566885368 heartbeating to localhost/127.0.0.1:39515 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2010868496-172.17.0.2-1731566885368 (Datanode Uuid b6813ce8-6b2a-45b1-b7c6-5468599a678c) service to localhost/127.0.0.1:39515 2024-11-14T06:48:54,252 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2e243ac2-bb22-2a9a-7458-441c2025024a/cluster_66668d32-f255-2691-2a87-36d2a9a81533/data/data1/current/BP-2010868496-172.17.0.2-1731566885368 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T06:48:54,252 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2e243ac2-bb22-2a9a-7458-441c2025024a/cluster_66668d32-f255-2691-2a87-36d2a9a81533/data/data2/current/BP-2010868496-172.17.0.2-1731566885368 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T06:48:54,253 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T06:48:54,258 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4bdca924{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T06:48:54,258 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@27fb1a0a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T06:48:54,258 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T06:48:54,259 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4b767eb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T06:48:54,259 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1fda4535{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2e243ac2-bb22-2a9a-7458-441c2025024a/hadoop.log.dir/,STOPPED} 2024-11-14T06:48:54,264 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-14T06:48:54,266 INFO [regionserver/20680646cf8a:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T06:48:54,292 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-14T06:48:54,304 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=229 (was 206) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39515 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39515 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39515 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39515 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:39515 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39515 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39515 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:39515 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=515 (was 483) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=169 (was 52) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=719 (was 1089) 2024-11-14T06:48:54,311 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=229, OpenFileDescriptor=515, MaxFileDescriptor=1048576, SystemLoadAverage=169, ProcessCount=11, AvailableMemoryMB=719 2024-11-14T06:48:54,311 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-14T06:48:54,311 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2e243ac2-bb22-2a9a-7458-441c2025024a/hadoop.log.dir so I do NOT create it in target/test-data/693bae84-f601-168e-b4ff-c662493b7d49 2024-11-14T06:48:54,311 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2e243ac2-bb22-2a9a-7458-441c2025024a/hadoop.tmp.dir so I do NOT create it in target/test-data/693bae84-f601-168e-b4ff-c662493b7d49 2024-11-14T06:48:54,311 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/693bae84-f601-168e-b4ff-c662493b7d49/cluster_9dc0f410-2e85-675d-e08d-7b710d1cab13, deleteOnExit=true 2024-11-14T06:48:54,311 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-14T06:48:54,312 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/693bae84-f601-168e-b4ff-c662493b7d49/test.cache.data in system properties and HBase conf 2024-11-14T06:48:54,312 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/693bae84-f601-168e-b4ff-c662493b7d49/hadoop.tmp.dir in system properties and HBase conf 2024-11-14T06:48:54,312 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/693bae84-f601-168e-b4ff-c662493b7d49/hadoop.log.dir in system properties and HBase conf 2024-11-14T06:48:54,312 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/693bae84-f601-168e-b4ff-c662493b7d49/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-14T06:48:54,312 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/693bae84-f601-168e-b4ff-c662493b7d49/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-14T06:48:54,312 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-14T06:48:54,312 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-14T06:48:54,312 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/693bae84-f601-168e-b4ff-c662493b7d49/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-14T06:48:54,312 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/693bae84-f601-168e-b4ff-c662493b7d49/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-14T06:48:54,312 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/693bae84-f601-168e-b4ff-c662493b7d49/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-14T06:48:54,312 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/693bae84-f601-168e-b4ff-c662493b7d49/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T06:48:54,312 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/693bae84-f601-168e-b4ff-c662493b7d49/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-14T06:48:54,312 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/693bae84-f601-168e-b4ff-c662493b7d49/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-14T06:48:54,313 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/693bae84-f601-168e-b4ff-c662493b7d49/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T06:48:54,313 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/693bae84-f601-168e-b4ff-c662493b7d49/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T06:48:54,313 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/693bae84-f601-168e-b4ff-c662493b7d49/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-14T06:48:54,313 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/693bae84-f601-168e-b4ff-c662493b7d49/nfs.dump.dir in system properties and HBase conf 2024-11-14T06:48:54,313 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/693bae84-f601-168e-b4ff-c662493b7d49/java.io.tmpdir in system properties and HBase conf 2024-11-14T06:48:54,313 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/693bae84-f601-168e-b4ff-c662493b7d49/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T06:48:54,313 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/693bae84-f601-168e-b4ff-c662493b7d49/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-14T06:48:54,313 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/693bae84-f601-168e-b4ff-c662493b7d49/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-14T06:48:54,327 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T06:48:54,366 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T06:48:54,369 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T06:48:54,373 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T06:48:54,373 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T06:48:54,373 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T06:48:54,374 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T06:48:54,374 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1e07b1d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/693bae84-f601-168e-b4ff-c662493b7d49/hadoop.log.dir/,AVAILABLE} 2024-11-14T06:48:54,374 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1a024a47{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T06:48:54,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:54,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:54,466 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5be97557{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/693bae84-f601-168e-b4ff-c662493b7d49/java.io.tmpdir/jetty-localhost-46259-hadoop-hdfs-3_4_1-tests_jar-_-any-16831303847839608603/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T06:48:54,466 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6ce66601{HTTP/1.1, (http/1.1)}{localhost:46259} 2024-11-14T06:48:54,466 INFO [Time-limited test {}] server.Server(415): Started @286063ms 2024-11-14T06:48:54,477 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T06:48:54,513 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T06:48:54,515 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T06:48:54,516 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T06:48:54,516 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T06:48:54,516 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T06:48:54,517 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7b01e479{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/693bae84-f601-168e-b4ff-c662493b7d49/hadoop.log.dir/,AVAILABLE} 2024-11-14T06:48:54,517 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@20042785{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T06:48:54,528 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T06:48:54,528 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-14T06:48:54,529 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-14T06:48:54,529 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-14T06:48:54,615 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@265f6a26{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/693bae84-f601-168e-b4ff-c662493b7d49/java.io.tmpdir/jetty-localhost-44939-hadoop-hdfs-3_4_1-tests_jar-_-any-10580139865892588377/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T06:48:54,616 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7d6e4d22{HTTP/1.1, (http/1.1)}{localhost:44939} 2024-11-14T06:48:54,616 INFO [Time-limited test {}] server.Server(415): Started @286213ms 2024-11-14T06:48:54,617 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T06:48:54,645 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T06:48:54,648 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T06:48:54,649 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T06:48:54,649 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T06:48:54,649 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T06:48:54,651 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@9268180{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/693bae84-f601-168e-b4ff-c662493b7d49/hadoop.log.dir/,AVAILABLE} 2024-11-14T06:48:54,651 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@e818af2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T06:48:54,676 WARN [Thread-2494 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/693bae84-f601-168e-b4ff-c662493b7d49/cluster_9dc0f410-2e85-675d-e08d-7b710d1cab13/data/data1/current/BP-2076026094-172.17.0.2-1731566934330/current, will proceed with Du for space computation calculation, 2024-11-14T06:48:54,676 WARN [Thread-2495 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/693bae84-f601-168e-b4ff-c662493b7d49/cluster_9dc0f410-2e85-675d-e08d-7b710d1cab13/data/data2/current/BP-2076026094-172.17.0.2-1731566934330/current, will proceed with Du for space computation calculation, 2024-11-14T06:48:54,696 WARN [Thread-2473 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T06:48:54,699 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x45f9edfa89079c40 with lease ID 0xfb41d1ce7502f82c: Processing first storage report for DS-9cabc60d-10a9-4cec-9c9c-6a7e80994810 from datanode DatanodeRegistration(127.0.0.1:37163, datanodeUuid=7bfc013a-00aa-4e99-a5e9-67b880910def, infoPort=35777, infoSecurePort=0, ipcPort=38443, storageInfo=lv=-57;cid=testClusterID;nsid=851519320;c=1731566934330) 2024-11-14T06:48:54,699 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x45f9edfa89079c40 with lease ID 0xfb41d1ce7502f82c: from storage DS-9cabc60d-10a9-4cec-9c9c-6a7e80994810 node DatanodeRegistration(127.0.0.1:37163, datanodeUuid=7bfc013a-00aa-4e99-a5e9-67b880910def, infoPort=35777, infoSecurePort=0, ipcPort=38443, storageInfo=lv=-57;cid=testClusterID;nsid=851519320;c=1731566934330), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T06:48:54,699 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x45f9edfa89079c40 with lease ID 0xfb41d1ce7502f82c: Processing first storage report for DS-25ca4f90-786c-4e5d-b7c2-24e6cbea9592 from datanode DatanodeRegistration(127.0.0.1:37163, datanodeUuid=7bfc013a-00aa-4e99-a5e9-67b880910def, infoPort=35777, infoSecurePort=0, ipcPort=38443, storageInfo=lv=-57;cid=testClusterID;nsid=851519320;c=1731566934330) 2024-11-14T06:48:54,699 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x45f9edfa89079c40 with lease ID 0xfb41d1ce7502f82c: from storage DS-25ca4f90-786c-4e5d-b7c2-24e6cbea9592 node DatanodeRegistration(127.0.0.1:37163, datanodeUuid=7bfc013a-00aa-4e99-a5e9-67b880910def, infoPort=35777, infoSecurePort=0, ipcPort=38443, storageInfo=lv=-57;cid=testClusterID;nsid=851519320;c=1731566934330), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T06:48:54,755 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@45235d1a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/693bae84-f601-168e-b4ff-c662493b7d49/java.io.tmpdir/jetty-localhost-36157-hadoop-hdfs-3_4_1-tests_jar-_-any-2793546116447700494/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T06:48:54,755 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2527c84a{HTTP/1.1, (http/1.1)}{localhost:36157} 2024-11-14T06:48:54,755 INFO [Time-limited test {}] server.Server(415): Started @286352ms 2024-11-14T06:48:54,756 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T06:48:54,809 WARN [Thread-2521 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/693bae84-f601-168e-b4ff-c662493b7d49/cluster_9dc0f410-2e85-675d-e08d-7b710d1cab13/data/data4/current/BP-2076026094-172.17.0.2-1731566934330/current, will proceed with Du for space computation calculation, 2024-11-14T06:48:54,809 WARN [Thread-2520 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/693bae84-f601-168e-b4ff-c662493b7d49/cluster_9dc0f410-2e85-675d-e08d-7b710d1cab13/data/data3/current/BP-2076026094-172.17.0.2-1731566934330/current, will proceed with Du for space computation calculation, 2024-11-14T06:48:54,828 WARN [Thread-2509 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T06:48:54,830 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb6c5b77fcab1d60a with lease ID 0xfb41d1ce7502f82d: Processing first storage report for DS-362fcbb2-b481-4cd9-a734-538870b9eadb from datanode DatanodeRegistration(127.0.0.1:37127, datanodeUuid=493d5b00-28b0-4b14-912a-10a6ab2cd91d, infoPort=33025, infoSecurePort=0, ipcPort=44275, storageInfo=lv=-57;cid=testClusterID;nsid=851519320;c=1731566934330) 2024-11-14T06:48:54,830 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb6c5b77fcab1d60a with lease ID 0xfb41d1ce7502f82d: from storage DS-362fcbb2-b481-4cd9-a734-538870b9eadb node DatanodeRegistration(127.0.0.1:37127, datanodeUuid=493d5b00-28b0-4b14-912a-10a6ab2cd91d, infoPort=33025, infoSecurePort=0, ipcPort=44275, storageInfo=lv=-57;cid=testClusterID;nsid=851519320;c=1731566934330), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T06:48:54,830 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb6c5b77fcab1d60a with lease ID 0xfb41d1ce7502f82d: Processing first storage report for DS-513b7a79-68de-497b-954f-69074bdd2c40 from datanode DatanodeRegistration(127.0.0.1:37127, datanodeUuid=493d5b00-28b0-4b14-912a-10a6ab2cd91d, infoPort=33025, infoSecurePort=0, ipcPort=44275, storageInfo=lv=-57;cid=testClusterID;nsid=851519320;c=1731566934330) 2024-11-14T06:48:54,830 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb6c5b77fcab1d60a with lease ID 0xfb41d1ce7502f82d: from storage DS-513b7a79-68de-497b-954f-69074bdd2c40 node DatanodeRegistration(127.0.0.1:37127, datanodeUuid=493d5b00-28b0-4b14-912a-10a6ab2cd91d, infoPort=33025, infoSecurePort=0, ipcPort=44275, storageInfo=lv=-57;cid=testClusterID;nsid=851519320;c=1731566934330), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T06:48:54,878 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/693bae84-f601-168e-b4ff-c662493b7d49 2024-11-14T06:48:54,880 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/693bae84-f601-168e-b4ff-c662493b7d49/cluster_9dc0f410-2e85-675d-e08d-7b710d1cab13/zookeeper_0, clientPort=50296, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/693bae84-f601-168e-b4ff-c662493b7d49/cluster_9dc0f410-2e85-675d-e08d-7b710d1cab13/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/693bae84-f601-168e-b4ff-c662493b7d49/cluster_9dc0f410-2e85-675d-e08d-7b710d1cab13/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-14T06:48:54,881 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=50296 2024-11-14T06:48:54,881 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T06:48:54,882 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T06:48:54,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37163 is added to blk_1073741825_1001 (size=7) 2024-11-14T06:48:54,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37127 is added to blk_1073741825_1001 (size=7) 2024-11-14T06:48:54,891 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:35559/user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23 with version=8 2024-11-14T06:48:54,891 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:33705/user/jenkins/test-data/0b8f8aa6-a36a-353d-3229-387a075f7ca6/hbase-staging 2024-11-14T06:48:54,892 INFO [Time-limited test {}] client.ConnectionUtils(128): master/20680646cf8a:0 server-side Connection retries=45 2024-11-14T06:48:54,893 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T06:48:54,893 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T06:48:54,893 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T06:48:54,893 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T06:48:54,893 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T06:48:54,893 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-14T06:48:54,893 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T06:48:54,894 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39543 2024-11-14T06:48:54,895 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:39543 connecting to ZooKeeper ensemble=127.0.0.1:50296 2024-11-14T06:48:54,900 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:395430x0, quorum=127.0.0.1:50296, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T06:48:54,900 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:39543-0x1003cfdf7ce0000 connected 2024-11-14T06:48:54,913 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T06:48:54,914 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T06:48:54,917 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39543-0x1003cfdf7ce0000, quorum=127.0.0.1:50296, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T06:48:54,917 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:35559/user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23, hbase.cluster.distributed=false 2024-11-14T06:48:54,918 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39543-0x1003cfdf7ce0000, quorum=127.0.0.1:50296, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T06:48:54,921 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39543 2024-11-14T06:48:54,921 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39543 2024-11-14T06:48:54,923 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39543 2024-11-14T06:48:54,925 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39543 2024-11-14T06:48:54,926 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39543 2024-11-14T06:48:54,941 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/20680646cf8a:0 server-side Connection retries=45 2024-11-14T06:48:54,941 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T06:48:54,941 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T06:48:54,941 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T06:48:54,941 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T06:48:54,941 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T06:48:54,941 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-14T06:48:54,941 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T06:48:54,942 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40845 2024-11-14T06:48:54,943 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40845 connecting to ZooKeeper ensemble=127.0.0.1:50296 2024-11-14T06:48:54,943 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T06:48:54,945 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T06:48:54,948 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:408450x0, quorum=127.0.0.1:50296, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T06:48:54,948 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:408450x0, quorum=127.0.0.1:50296, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T06:48:54,948 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40845-0x1003cfdf7ce0001 connected 2024-11-14T06:48:54,949 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-14T06:48:54,949 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-14T06:48:54,949 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40845-0x1003cfdf7ce0001, quorum=127.0.0.1:50296, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-14T06:48:54,950 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40845-0x1003cfdf7ce0001, quorum=127.0.0.1:50296, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T06:48:54,953 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40845 2024-11-14T06:48:54,953 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40845 2024-11-14T06:48:54,956 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40845 2024-11-14T06:48:54,958 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40845 2024-11-14T06:48:54,961 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40845 2024-11-14T06:48:54,971 DEBUG [M:0;20680646cf8a:39543 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;20680646cf8a:39543 2024-11-14T06:48:54,971 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/20680646cf8a,39543,1731566934892 2024-11-14T06:48:54,972 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40845-0x1003cfdf7ce0001, quorum=127.0.0.1:50296, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T06:48:54,972 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39543-0x1003cfdf7ce0000, quorum=127.0.0.1:50296, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T06:48:54,973 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39543-0x1003cfdf7ce0000, quorum=127.0.0.1:50296, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/20680646cf8a,39543,1731566934892 2024-11-14T06:48:54,974 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40845-0x1003cfdf7ce0001, quorum=127.0.0.1:50296, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-14T06:48:54,974 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39543-0x1003cfdf7ce0000, quorum=127.0.0.1:50296, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:48:54,974 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40845-0x1003cfdf7ce0001, quorum=127.0.0.1:50296, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:48:54,974 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39543-0x1003cfdf7ce0000, quorum=127.0.0.1:50296, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-14T06:48:54,974 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/20680646cf8a,39543,1731566934892 from backup master directory 2024-11-14T06:48:54,975 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39543-0x1003cfdf7ce0000, quorum=127.0.0.1:50296, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/20680646cf8a,39543,1731566934892 2024-11-14T06:48:54,975 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40845-0x1003cfdf7ce0001, quorum=127.0.0.1:50296, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T06:48:54,975 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39543-0x1003cfdf7ce0000, quorum=127.0.0.1:50296, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T06:48:54,975 WARN [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T06:48:54,975 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=20680646cf8a,39543,1731566934892 2024-11-14T06:48:54,978 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:35559/user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23/hbase.id] with ID: 6c700ee6-7ae5-49fd-aebe-746d8b108a9a 2024-11-14T06:48:54,978 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:35559/user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23/.tmp/hbase.id 2024-11-14T06:48:54,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37163 is added to blk_1073741826_1002 (size=42) 2024-11-14T06:48:54,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37127 is added to blk_1073741826_1002 (size=42) 2024-11-14T06:48:54,986 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:35559/user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23/.tmp/hbase.id]:[hdfs://localhost:35559/user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23/hbase.id] 2024-11-14T06:48:54,995 INFO [master/20680646cf8a:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T06:48:54,995 INFO [master/20680646cf8a:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-14T06:48:54,996 INFO [master/20680646cf8a:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-14T06:48:54,997 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40845-0x1003cfdf7ce0001, quorum=127.0.0.1:50296, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:48:54,997 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39543-0x1003cfdf7ce0000, quorum=127.0.0.1:50296, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:48:55,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37163 is added to blk_1073741827_1003 (size=196) 2024-11-14T06:48:55,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37127 is added to blk_1073741827_1003 (size=196) 2024-11-14T06:48:55,005 INFO [master/20680646cf8a:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-14T06:48:55,006 INFO [master/20680646cf8a:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-14T06:48:55,006 INFO [master/20680646cf8a:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T06:48:55,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37127 is added to blk_1073741828_1004 (size=1189) 2024-11-14T06:48:55,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37163 is added to blk_1073741828_1004 (size=1189) 2024-11-14T06:48:55,013 INFO [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:35559/user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23/MasterData/data/master/store 2024-11-14T06:48:55,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37163 is added to blk_1073741829_1005 (size=34) 2024-11-14T06:48:55,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37127 is added to blk_1073741829_1005 (size=34) 2024-11-14T06:48:55,019 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T06:48:55,019 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T06:48:55,019 INFO [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T06:48:55,019 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T06:48:55,019 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T06:48:55,019 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T06:48:55,019 INFO [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T06:48:55,019 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731566935019Disabling compacts and flushes for region at 1731566935019Disabling writes for close at 1731566935019Writing region close event to WAL at 1731566935019Closed at 1731566935019 2024-11-14T06:48:55,020 WARN [master/20680646cf8a:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:35559/user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23/MasterData/data/master/store/.initializing 2024-11-14T06:48:55,020 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:35559/user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23/MasterData/WALs/20680646cf8a,39543,1731566934892 2024-11-14T06:48:55,022 INFO [master/20680646cf8a:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=20680646cf8a%2C39543%2C1731566934892, suffix=, logDir=hdfs://localhost:35559/user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23/MasterData/WALs/20680646cf8a,39543,1731566934892, archiveDir=hdfs://localhost:35559/user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23/MasterData/oldWALs, maxLogs=10 2024-11-14T06:48:55,023 INFO [master/20680646cf8a:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 20680646cf8a%2C39543%2C1731566934892.1731566935022 2024-11-14T06:48:55,029 INFO [master/20680646cf8a:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23/MasterData/WALs/20680646cf8a,39543,1731566934892/20680646cf8a%2C39543%2C1731566934892.1731566935022 2024-11-14T06:48:55,033 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33025:33025),(127.0.0.1/127.0.0.1:35777:35777)] 2024-11-14T06:48:55,036 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-14T06:48:55,036 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T06:48:55,036 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:48:55,036 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:48:55,039 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:48:55,041 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-14T06:48:55,041 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:48:55,041 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T06:48:55,041 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:48:55,043 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-14T06:48:55,043 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:48:55,043 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T06:48:55,043 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:48:55,045 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-14T06:48:55,045 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:48:55,045 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T06:48:55,045 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:48:55,046 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-14T06:48:55,046 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:48:55,047 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T06:48:55,047 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:48:55,048 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35559/user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:48:55,048 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35559/user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:48:55,049 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:48:55,049 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:48:55,049 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-14T06:48:55,051 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T06:48:55,054 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35559/user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T06:48:55,054 INFO [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=829486, jitterRate=0.0547465980052948}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-14T06:48:55,055 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731566935036Initializing all the Stores at 1731566935037 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731566935037Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731566935039 (+2 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731566935039Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731566935039Cleaning up temporary data from old regions at 1731566935049 (+10 ms)Region opened successfully at 1731566935055 (+6 ms) 2024-11-14T06:48:55,055 INFO [master/20680646cf8a:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-14T06:48:55,058 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@407e0532, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=20680646cf8a/172.17.0.2:0 2024-11-14T06:48:55,059 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-14T06:48:55,059 INFO [master/20680646cf8a:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-14T06:48:55,059 INFO [master/20680646cf8a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-14T06:48:55,059 INFO [master/20680646cf8a:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-14T06:48:55,060 INFO [master/20680646cf8a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-14T06:48:55,060 INFO [master/20680646cf8a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-14T06:48:55,060 INFO [master/20680646cf8a:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-14T06:48:55,062 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-14T06:48:55,063 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39543-0x1003cfdf7ce0000, quorum=127.0.0.1:50296, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-14T06:48:55,063 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-14T06:48:55,064 INFO [master/20680646cf8a:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-14T06:48:55,064 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39543-0x1003cfdf7ce0000, quorum=127.0.0.1:50296, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-14T06:48:55,065 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-14T06:48:55,065 INFO [master/20680646cf8a:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-14T06:48:55,066 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39543-0x1003cfdf7ce0000, quorum=127.0.0.1:50296, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-14T06:48:55,067 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-14T06:48:55,067 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39543-0x1003cfdf7ce0000, quorum=127.0.0.1:50296, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-14T06:48:55,068 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-14T06:48:55,069 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39543-0x1003cfdf7ce0000, quorum=127.0.0.1:50296, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-14T06:48:55,070 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-14T06:48:55,071 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40845-0x1003cfdf7ce0001, quorum=127.0.0.1:50296, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T06:48:55,071 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39543-0x1003cfdf7ce0000, quorum=127.0.0.1:50296, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T06:48:55,071 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40845-0x1003cfdf7ce0001, quorum=127.0.0.1:50296, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:48:55,071 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39543-0x1003cfdf7ce0000, quorum=127.0.0.1:50296, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:48:55,071 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=20680646cf8a,39543,1731566934892, sessionid=0x1003cfdf7ce0000, setting cluster-up flag (Was=false) 2024-11-14T06:48:55,073 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40845-0x1003cfdf7ce0001, quorum=127.0.0.1:50296, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:48:55,073 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39543-0x1003cfdf7ce0000, quorum=127.0.0.1:50296, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:48:55,075 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-14T06:48:55,076 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=20680646cf8a,39543,1731566934892 2024-11-14T06:48:55,077 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39543-0x1003cfdf7ce0000, quorum=127.0.0.1:50296, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:48:55,077 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40845-0x1003cfdf7ce0001, quorum=127.0.0.1:50296, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:48:55,080 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-14T06:48:55,080 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=20680646cf8a,39543,1731566934892 2024-11-14T06:48:55,081 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:35559/user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-14T06:48:55,083 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-14T06:48:55,083 INFO [master/20680646cf8a:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-14T06:48:55,084 INFO [master/20680646cf8a:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-14T06:48:55,084 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 20680646cf8a,39543,1731566934892 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-14T06:48:55,085 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/20680646cf8a:0, corePoolSize=5, maxPoolSize=5 2024-11-14T06:48:55,085 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/20680646cf8a:0, corePoolSize=5, maxPoolSize=5 2024-11-14T06:48:55,085 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/20680646cf8a:0, corePoolSize=5, maxPoolSize=5 2024-11-14T06:48:55,085 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/20680646cf8a:0, corePoolSize=5, maxPoolSize=5 2024-11-14T06:48:55,085 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/20680646cf8a:0, corePoolSize=10, maxPoolSize=10 2024-11-14T06:48:55,085 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:48:55,085 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/20680646cf8a:0, corePoolSize=2, maxPoolSize=2 2024-11-14T06:48:55,085 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:48:55,090 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T06:48:55,090 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-14T06:48:55,091 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:48:55,091 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-14T06:48:55,095 INFO [master/20680646cf8a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731566965095 2024-11-14T06:48:55,095 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-14T06:48:55,095 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-14T06:48:55,095 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-14T06:48:55,095 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-14T06:48:55,095 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-14T06:48:55,095 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-14T06:48:55,095 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T06:48:55,099 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-14T06:48:55,100 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-14T06:48:55,100 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-14T06:48:55,100 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-14T06:48:55,100 INFO [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-14T06:48:55,100 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/20680646cf8a:0:becomeActiveMaster-HFileCleaner.large.0-1731566935100,5,FailOnTimeoutGroup] 2024-11-14T06:48:55,101 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/20680646cf8a:0:becomeActiveMaster-HFileCleaner.small.0-1731566935100,5,FailOnTimeoutGroup] 2024-11-14T06:48:55,101 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T06:48:55,101 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-14T06:48:55,101 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-14T06:48:55,101 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-14T06:48:55,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37163 is added to blk_1073741831_1007 (size=1321) 2024-11-14T06:48:55,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37127 is added to blk_1073741831_1007 (size=1321) 2024-11-14T06:48:55,102 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:35559/user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-14T06:48:55,102 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:35559/user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23 2024-11-14T06:48:55,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37163 is added to blk_1073741832_1008 (size=32) 2024-11-14T06:48:55,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37127 is added to blk_1073741832_1008 (size=32) 2024-11-14T06:48:55,110 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T06:48:55,111 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T06:48:55,112 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T06:48:55,112 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:48:55,113 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T06:48:55,113 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T06:48:55,114 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T06:48:55,114 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:48:55,114 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T06:48:55,114 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T06:48:55,115 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T06:48:55,116 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:48:55,116 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T06:48:55,116 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T06:48:55,117 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T06:48:55,117 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:48:55,118 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T06:48:55,118 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T06:48:55,118 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35559/user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23/data/hbase/meta/1588230740 2024-11-14T06:48:55,119 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35559/user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23/data/hbase/meta/1588230740 2024-11-14T06:48:55,119 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T06:48:55,119 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T06:48:55,120 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T06:48:55,121 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T06:48:55,122 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35559/user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T06:48:55,122 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=739903, jitterRate=-0.05916522443294525}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T06:48:55,123 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731566935110Initializing all the Stores at 1731566935111 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731566935111Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731566935111Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731566935111Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731566935111Cleaning up temporary data from old regions at 1731566935120 (+9 ms)Region opened successfully at 1731566935123 (+3 ms) 2024-11-14T06:48:55,123 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T06:48:55,123 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T06:48:55,123 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T06:48:55,123 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T06:48:55,123 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T06:48:55,123 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T06:48:55,123 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731566935123Disabling compacts and flushes for region at 1731566935123Disabling writes for close at 1731566935123Writing region close event to WAL at 1731566935123Closed at 1731566935123 2024-11-14T06:48:55,124 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T06:48:55,124 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-14T06:48:55,125 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-14T06:48:55,125 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T06:48:55,126 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-14T06:48:55,162 INFO [RS:0;20680646cf8a:40845 {}] regionserver.HRegionServer(746): ClusterId : 6c700ee6-7ae5-49fd-aebe-746d8b108a9a 2024-11-14T06:48:55,162 DEBUG [RS:0;20680646cf8a:40845 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-14T06:48:55,164 DEBUG [RS:0;20680646cf8a:40845 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-14T06:48:55,164 DEBUG [RS:0;20680646cf8a:40845 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-14T06:48:55,165 DEBUG [RS:0;20680646cf8a:40845 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-14T06:48:55,165 DEBUG [RS:0;20680646cf8a:40845 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6100077f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=20680646cf8a/172.17.0.2:0 2024-11-14T06:48:55,177 DEBUG [RS:0;20680646cf8a:40845 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;20680646cf8a:40845 2024-11-14T06:48:55,177 INFO [RS:0;20680646cf8a:40845 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-14T06:48:55,177 INFO [RS:0;20680646cf8a:40845 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-14T06:48:55,177 DEBUG [RS:0;20680646cf8a:40845 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-14T06:48:55,178 INFO [RS:0;20680646cf8a:40845 {}] regionserver.HRegionServer(2659): reportForDuty to master=20680646cf8a,39543,1731566934892 with port=40845, startcode=1731566934940 2024-11-14T06:48:55,178 DEBUG [RS:0;20680646cf8a:40845 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-14T06:48:55,180 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43799, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-11-14T06:48:55,181 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39543 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 20680646cf8a,40845,1731566934940 2024-11-14T06:48:55,181 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39543 {}] master.ServerManager(517): Registering regionserver=20680646cf8a,40845,1731566934940 2024-11-14T06:48:55,182 DEBUG [RS:0;20680646cf8a:40845 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35559/user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23 2024-11-14T06:48:55,182 DEBUG [RS:0;20680646cf8a:40845 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35559 2024-11-14T06:48:55,182 DEBUG [RS:0;20680646cf8a:40845 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-14T06:48:55,183 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39543-0x1003cfdf7ce0000, quorum=127.0.0.1:50296, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T06:48:55,184 DEBUG [RS:0;20680646cf8a:40845 {}] zookeeper.ZKUtil(111): regionserver:40845-0x1003cfdf7ce0001, quorum=127.0.0.1:50296, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/20680646cf8a,40845,1731566934940 2024-11-14T06:48:55,184 WARN [RS:0;20680646cf8a:40845 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T06:48:55,184 INFO [RS:0;20680646cf8a:40845 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T06:48:55,184 DEBUG [RS:0;20680646cf8a:40845 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35559/user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23/WALs/20680646cf8a,40845,1731566934940 2024-11-14T06:48:55,184 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [20680646cf8a,40845,1731566934940] 2024-11-14T06:48:55,187 INFO [RS:0;20680646cf8a:40845 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-14T06:48:55,188 INFO [RS:0;20680646cf8a:40845 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-14T06:48:55,189 INFO [RS:0;20680646cf8a:40845 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-14T06:48:55,189 INFO [RS:0;20680646cf8a:40845 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T06:48:55,189 INFO [RS:0;20680646cf8a:40845 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-14T06:48:55,190 INFO [RS:0;20680646cf8a:40845 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-14T06:48:55,190 INFO [RS:0;20680646cf8a:40845 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-14T06:48:55,190 DEBUG [RS:0;20680646cf8a:40845 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:48:55,190 DEBUG [RS:0;20680646cf8a:40845 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:48:55,190 DEBUG [RS:0;20680646cf8a:40845 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:48:55,190 DEBUG [RS:0;20680646cf8a:40845 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:48:55,190 DEBUG [RS:0;20680646cf8a:40845 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:48:55,190 DEBUG [RS:0;20680646cf8a:40845 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/20680646cf8a:0, corePoolSize=2, maxPoolSize=2 2024-11-14T06:48:55,190 DEBUG [RS:0;20680646cf8a:40845 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:48:55,190 DEBUG [RS:0;20680646cf8a:40845 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:48:55,190 DEBUG [RS:0;20680646cf8a:40845 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:48:55,190 DEBUG [RS:0;20680646cf8a:40845 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:48:55,190 DEBUG [RS:0;20680646cf8a:40845 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:48:55,190 DEBUG [RS:0;20680646cf8a:40845 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/20680646cf8a:0, corePoolSize=1, maxPoolSize=1 2024-11-14T06:48:55,190 DEBUG [RS:0;20680646cf8a:40845 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/20680646cf8a:0, corePoolSize=3, maxPoolSize=3 2024-11-14T06:48:55,190 DEBUG [RS:0;20680646cf8a:40845 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/20680646cf8a:0, corePoolSize=3, maxPoolSize=3 2024-11-14T06:48:55,192 INFO [RS:0;20680646cf8a:40845 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T06:48:55,192 INFO [RS:0;20680646cf8a:40845 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T06:48:55,192 INFO [RS:0;20680646cf8a:40845 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T06:48:55,192 INFO [RS:0;20680646cf8a:40845 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-14T06:48:55,193 INFO [RS:0;20680646cf8a:40845 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-14T06:48:55,193 INFO [RS:0;20680646cf8a:40845 {}] hbase.ChoreService(168): Chore ScheduledChore name=20680646cf8a,40845,1731566934940-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T06:48:55,206 INFO [RS:0;20680646cf8a:40845 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-14T06:48:55,206 INFO [RS:0;20680646cf8a:40845 {}] hbase.ChoreService(168): Chore ScheduledChore name=20680646cf8a,40845,1731566934940-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T06:48:55,206 INFO [RS:0;20680646cf8a:40845 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T06:48:55,206 INFO [RS:0;20680646cf8a:40845 {}] regionserver.Replication(171): 20680646cf8a,40845,1731566934940 started 2024-11-14T06:48:55,219 INFO [RS:0;20680646cf8a:40845 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T06:48:55,219 INFO [RS:0;20680646cf8a:40845 {}] regionserver.HRegionServer(1482): Serving as 20680646cf8a,40845,1731566934940, RpcServer on 20680646cf8a/172.17.0.2:40845, sessionid=0x1003cfdf7ce0001 2024-11-14T06:48:55,219 DEBUG [RS:0;20680646cf8a:40845 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-14T06:48:55,219 DEBUG [RS:0;20680646cf8a:40845 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 20680646cf8a,40845,1731566934940 2024-11-14T06:48:55,219 DEBUG [RS:0;20680646cf8a:40845 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '20680646cf8a,40845,1731566934940' 2024-11-14T06:48:55,219 DEBUG [RS:0;20680646cf8a:40845 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-14T06:48:55,220 DEBUG [RS:0;20680646cf8a:40845 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-14T06:48:55,220 DEBUG [RS:0;20680646cf8a:40845 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-14T06:48:55,220 DEBUG [RS:0;20680646cf8a:40845 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-14T06:48:55,220 DEBUG [RS:0;20680646cf8a:40845 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 20680646cf8a,40845,1731566934940 2024-11-14T06:48:55,220 DEBUG [RS:0;20680646cf8a:40845 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '20680646cf8a,40845,1731566934940' 2024-11-14T06:48:55,220 DEBUG [RS:0;20680646cf8a:40845 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-14T06:48:55,221 DEBUG [RS:0;20680646cf8a:40845 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-14T06:48:55,221 DEBUG [RS:0;20680646cf8a:40845 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-14T06:48:55,221 INFO [RS:0;20680646cf8a:40845 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-14T06:48:55,221 INFO [RS:0;20680646cf8a:40845 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-14T06:48:55,277 WARN [20680646cf8a:39543 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-14T06:48:55,323 INFO [RS:0;20680646cf8a:40845 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=20680646cf8a%2C40845%2C1731566934940, suffix=, logDir=hdfs://localhost:35559/user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23/WALs/20680646cf8a,40845,1731566934940, archiveDir=hdfs://localhost:35559/user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23/oldWALs, maxLogs=32 2024-11-14T06:48:55,323 INFO [RS:0;20680646cf8a:40845 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 20680646cf8a%2C40845%2C1731566934940.1731566935323 2024-11-14T06:48:55,328 INFO [RS:0;20680646cf8a:40845 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23/WALs/20680646cf8a,40845,1731566934940/20680646cf8a%2C40845%2C1731566934940.1731566935323 2024-11-14T06:48:55,329 DEBUG [RS:0;20680646cf8a:40845 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35777:35777),(127.0.0.1/127.0.0.1:33025:33025)] 2024-11-14T06:48:55,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,37019,1731566754231/20680646cf8a%2C37019%2C1731566754231.1731566754449 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:55,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33995/user/jenkins/test-data/854bbe2a-e693-e789-410a-ebb478bb9cc9/WALs/20680646cf8a,39105,1731566753272/20680646cf8a%2C39105%2C1731566753272.meta.1731566754094.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T06:48:55,527 DEBUG [20680646cf8a:39543 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-14T06:48:55,527 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=20680646cf8a,40845,1731566934940 2024-11-14T06:48:55,528 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 20680646cf8a,40845,1731566934940, state=OPENING 2024-11-14T06:48:55,529 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-14T06:48:55,530 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39543-0x1003cfdf7ce0000, quorum=127.0.0.1:50296, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:48:55,530 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40845-0x1003cfdf7ce0001, quorum=127.0.0.1:50296, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:48:55,531 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T06:48:55,531 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T06:48:55,531 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T06:48:55,531 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=20680646cf8a,40845,1731566934940}] 2024-11-14T06:48:55,683 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-14T06:48:55,685 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47945, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-14T06:48:55,689 INFO [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-14T06:48:55,690 INFO [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T06:48:55,692 INFO [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=20680646cf8a%2C40845%2C1731566934940.meta, suffix=.meta, logDir=hdfs://localhost:35559/user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23/WALs/20680646cf8a,40845,1731566934940, archiveDir=hdfs://localhost:35559/user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23/oldWALs, maxLogs=32 2024-11-14T06:48:55,692 INFO [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 20680646cf8a%2C40845%2C1731566934940.meta.1731566935692.meta 2024-11-14T06:48:55,698 INFO [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23/WALs/20680646cf8a,40845,1731566934940/20680646cf8a%2C40845%2C1731566934940.meta.1731566935692.meta 2024-11-14T06:48:55,703 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35777:35777),(127.0.0.1/127.0.0.1:33025:33025)] 2024-11-14T06:48:55,707 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-14T06:48:55,707 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-14T06:48:55,707 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-14T06:48:55,707 INFO [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-14T06:48:55,708 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-14T06:48:55,708 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T06:48:55,708 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-14T06:48:55,708 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-14T06:48:55,709 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T06:48:55,710 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T06:48:55,710 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:48:55,710 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T06:48:55,710 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T06:48:55,711 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T06:48:55,711 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:48:55,711 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T06:48:55,711 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T06:48:55,712 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T06:48:55,712 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:48:55,712 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T06:48:55,713 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T06:48:55,713 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T06:48:55,713 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T06:48:55,713 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T06:48:55,714 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T06:48:55,714 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35559/user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23/data/hbase/meta/1588230740 2024-11-14T06:48:55,715 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35559/user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23/data/hbase/meta/1588230740 2024-11-14T06:48:55,716 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T06:48:55,716 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T06:48:55,716 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T06:48:55,717 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T06:48:55,718 INFO [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=737734, jitterRate=-0.06192335486412048}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T06:48:55,718 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-14T06:48:55,718 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731566935708Writing region info on filesystem at 1731566935708Initializing all the Stores at 1731566935709 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731566935709Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731566935709Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731566935709Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731566935709Cleaning up temporary data from old regions at 1731566935716 (+7 ms)Running coprocessor post-open hooks at 1731566935718 (+2 ms)Region opened successfully at 1731566935718 2024-11-14T06:48:55,719 INFO [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731566935683 2024-11-14T06:48:55,721 DEBUG [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-14T06:48:55,721 INFO [RS_OPEN_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-14T06:48:55,722 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=20680646cf8a,40845,1731566934940 2024-11-14T06:48:55,722 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 20680646cf8a,40845,1731566934940, state=OPEN 2024-11-14T06:48:55,724 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39543-0x1003cfdf7ce0000, quorum=127.0.0.1:50296, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T06:48:55,724 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40845-0x1003cfdf7ce0001, quorum=127.0.0.1:50296, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T06:48:55,725 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=20680646cf8a,40845,1731566934940 2024-11-14T06:48:55,725 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T06:48:55,725 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T06:48:55,727 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-14T06:48:55,727 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=20680646cf8a,40845,1731566934940 in 194 msec 2024-11-14T06:48:55,728 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-14T06:48:55,728 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 603 msec 2024-11-14T06:48:55,729 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T06:48:55,729 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-14T06:48:55,730 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T06:48:55,730 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=20680646cf8a,40845,1731566934940, seqNum=-1] 2024-11-14T06:48:55,731 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T06:48:55,732 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35251, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T06:48:55,736 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 653 msec 2024-11-14T06:48:55,736 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731566935736, completionTime=-1 2024-11-14T06:48:55,737 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-14T06:48:55,737 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-14T06:48:55,739 INFO [master/20680646cf8a:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-14T06:48:55,739 INFO [master/20680646cf8a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731566995739 2024-11-14T06:48:55,739 INFO [master/20680646cf8a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731567055739 2024-11-14T06:48:55,739 INFO [master/20680646cf8a:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-14T06:48:55,739 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=20680646cf8a,39543,1731566934892-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T06:48:55,739 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=20680646cf8a,39543,1731566934892-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T06:48:55,739 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=20680646cf8a,39543,1731566934892-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T06:48:55,739 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-20680646cf8a:39543, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T06:48:55,739 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-14T06:48:55,740 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-14T06:48:55,741 DEBUG [master/20680646cf8a:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-14T06:48:55,743 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.768sec 2024-11-14T06:48:55,743 INFO [master/20680646cf8a:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-14T06:48:55,743 INFO [master/20680646cf8a:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-14T06:48:55,743 INFO [master/20680646cf8a:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-14T06:48:55,743 INFO [master/20680646cf8a:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-14T06:48:55,744 INFO [master/20680646cf8a:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-14T06:48:55,744 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=20680646cf8a,39543,1731566934892-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T06:48:55,744 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=20680646cf8a,39543,1731566934892-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-14T06:48:55,746 DEBUG [master/20680646cf8a:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-14T06:48:55,746 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-14T06:48:55,746 INFO [master/20680646cf8a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=20680646cf8a,39543,1731566934892-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T06:48:55,763 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@59a6b271, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T06:48:55,763 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 20680646cf8a,39543,-1 for getting cluster id 2024-11-14T06:48:55,763 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-14T06:48:55,764 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6c700ee6-7ae5-49fd-aebe-746d8b108a9a' 2024-11-14T06:48:55,764 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-14T06:48:55,764 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6c700ee6-7ae5-49fd-aebe-746d8b108a9a" 2024-11-14T06:48:55,765 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@770fcd5f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T06:48:55,765 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [20680646cf8a,39543,-1] 2024-11-14T06:48:55,765 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-14T06:48:55,765 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T06:48:55,766 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40086, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-14T06:48:55,767 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3396cecd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T06:48:55,767 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T06:48:55,768 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=20680646cf8a,40845,1731566934940, seqNum=-1] 2024-11-14T06:48:55,768 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T06:48:55,769 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44276, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T06:48:55,771 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=20680646cf8a,39543,1731566934892 2024-11-14T06:48:55,771 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T06:48:55,773 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-14T06:48:55,773 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T06:48:55,775 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:35559/user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23/WALs/test.com,8080,1, archiveDir=hdfs://localhost:35559/user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23/oldWALs, maxLogs=32 2024-11-14T06:48:55,775 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731566935775 2024-11-14T06:48:55,780 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23/WALs/test.com,8080,1/test.com%2C8080%2C1.1731566935775 2024-11-14T06:48:55,780 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33025:33025),(127.0.0.1/127.0.0.1:35777:35777)] 2024-11-14T06:48:55,781 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731566935781 2024-11-14T06:48:55,785 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:55,785 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:55,785 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:55,785 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:55,785 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:55,786 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23/WALs/test.com,8080,1/test.com%2C8080%2C1.1731566935775 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23/WALs/test.com,8080,1/test.com%2C8080%2C1.1731566935781 2024-11-14T06:48:55,786 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35777:35777),(127.0.0.1/127.0.0.1:33025:33025)] 2024-11-14T06:48:55,786 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35559/user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23/WALs/test.com,8080,1/test.com%2C8080%2C1.1731566935775 is not closed yet, will try archiving it next time 2024-11-14T06:48:55,787 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:55,787 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:55,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37163 is added to blk_1073741835_1011 (size=93) 2024-11-14T06:48:55,787 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:55,787 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:55,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37127 is added to blk_1073741835_1011 (size=93) 2024-11-14T06:48:55,788 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:55,788 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35559/user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23/WALs/test.com,8080,1/test.com%2C8080%2C1.1731566935775 to hdfs://localhost:35559/user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23/oldWALs/test.com%2C8080%2C1.1731566935775 2024-11-14T06:48:55,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37127 is added to blk_1073741836_1012 (size=93) 2024-11-14T06:48:55,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37163 is added to blk_1073741836_1012 (size=93) 2024-11-14T06:48:55,791 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23/oldWALs 2024-11-14T06:48:55,792 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1731566935781) 2024-11-14T06:48:55,792 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-14T06:48:55,792 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T06:48:55,792 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T06:48:55,792 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T06:48:55,792 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T06:48:55,792 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-14T06:48:55,792 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-14T06:48:55,792 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1218461600, stopped=false 2024-11-14T06:48:55,792 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=20680646cf8a,39543,1731566934892 2024-11-14T06:48:55,793 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39543-0x1003cfdf7ce0000, quorum=127.0.0.1:50296, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T06:48:55,793 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40845-0x1003cfdf7ce0001, quorum=127.0.0.1:50296, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T06:48:55,793 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39543-0x1003cfdf7ce0000, quorum=127.0.0.1:50296, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:48:55,793 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40845-0x1003cfdf7ce0001, quorum=127.0.0.1:50296, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:48:55,793 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T06:48:55,794 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T06:48:55,794 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T06:48:55,794 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T06:48:55,794 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40845-0x1003cfdf7ce0001, quorum=127.0.0.1:50296, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T06:48:55,794 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '20680646cf8a,40845,1731566934940' ***** 2024-11-14T06:48:55,794 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-14T06:48:55,794 INFO [RS:0;20680646cf8a:40845 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-14T06:48:55,794 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-14T06:48:55,794 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:39543-0x1003cfdf7ce0000, quorum=127.0.0.1:50296, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T06:48:55,794 INFO [RS:0;20680646cf8a:40845 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-14T06:48:55,795 INFO [RS:0;20680646cf8a:40845 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-14T06:48:55,795 INFO [RS:0;20680646cf8a:40845 {}] regionserver.HRegionServer(959): stopping server 20680646cf8a,40845,1731566934940 2024-11-14T06:48:55,795 INFO [RS:0;20680646cf8a:40845 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T06:48:55,795 INFO [RS:0;20680646cf8a:40845 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;20680646cf8a:40845. 2024-11-14T06:48:55,795 DEBUG [RS:0;20680646cf8a:40845 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T06:48:55,795 DEBUG [RS:0;20680646cf8a:40845 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T06:48:55,795 INFO [RS:0;20680646cf8a:40845 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-14T06:48:55,795 INFO [RS:0;20680646cf8a:40845 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-14T06:48:55,795 INFO [RS:0;20680646cf8a:40845 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-14T06:48:55,795 INFO [RS:0;20680646cf8a:40845 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-14T06:48:55,795 INFO [RS:0;20680646cf8a:40845 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-14T06:48:55,795 DEBUG [RS:0;20680646cf8a:40845 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-14T06:48:55,795 DEBUG [RS:0;20680646cf8a:40845 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-14T06:48:55,795 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T06:48:55,795 INFO [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T06:48:55,795 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T06:48:55,795 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T06:48:55,795 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T06:48:55,796 INFO [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-14T06:48:55,810 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35559/user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23/data/hbase/meta/1588230740/.tmp/ns/adca301e838d42d3a19be451edd9db6e is 43, key is default/ns:d/1731566935732/Put/seqid=0 2024-11-14T06:48:55,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37163 is added to blk_1073741837_1013 (size=5153) 2024-11-14T06:48:55,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37127 is added to blk_1073741837_1013 (size=5153) 2024-11-14T06:48:55,815 INFO [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:35559/user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23/data/hbase/meta/1588230740/.tmp/ns/adca301e838d42d3a19be451edd9db6e 2024-11-14T06:48:55,820 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35559/user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23/data/hbase/meta/1588230740/.tmp/ns/adca301e838d42d3a19be451edd9db6e as hdfs://localhost:35559/user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23/data/hbase/meta/1588230740/ns/adca301e838d42d3a19be451edd9db6e 2024-11-14T06:48:55,824 INFO [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35559/user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23/data/hbase/meta/1588230740/ns/adca301e838d42d3a19be451edd9db6e, entries=2, sequenceid=6, filesize=5.0 K 2024-11-14T06:48:55,825 INFO [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 30ms, sequenceid=6, compaction requested=false 2024-11-14T06:48:55,826 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-14T06:48:55,831 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35559/user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-14T06:48:55,831 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T06:48:55,831 INFO [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T06:48:55,831 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731566935795Running coprocessor pre-close hooks at 1731566935795Disabling compacts and flushes for region at 1731566935795Disabling writes for close at 1731566935795Obtaining lock to block concurrent updates at 1731566935796 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1731566935796Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731566935796Flushing stores of hbase:meta,,1.1588230740 at 1731566935796Flushing 1588230740/ns: creating writer at 1731566935796Flushing 1588230740/ns: appending metadata at 1731566935810 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1731566935810Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@213bf926: reopening flushed file at 1731566935819 (+9 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 30ms, sequenceid=6, compaction requested=false at 1731566935825 (+6 ms)Writing region close event to WAL at 1731566935827 (+2 ms)Running coprocessor post-close hooks at 1731566935831 (+4 ms)Closed at 1731566935831 2024-11-14T06:48:55,832 DEBUG [RS_CLOSE_META-regionserver/20680646cf8a:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-14T06:48:55,996 INFO [RS:0;20680646cf8a:40845 {}] regionserver.HRegionServer(976): stopping server 20680646cf8a,40845,1731566934940; all regions closed. 2024-11-14T06:48:55,996 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:55,996 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:55,997 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:55,997 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:55,997 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:55,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37127 is added to blk_1073741834_1010 (size=1152) 2024-11-14T06:48:55,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37163 is added to blk_1073741834_1010 (size=1152) 2024-11-14T06:48:56,002 DEBUG [RS:0;20680646cf8a:40845 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23/oldWALs 2024-11-14T06:48:56,002 INFO [RS:0;20680646cf8a:40845 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 20680646cf8a%2C40845%2C1731566934940.meta:.meta(num 1731566935692) 2024-11-14T06:48:56,003 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:56,003 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:56,003 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:56,003 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:56,003 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:56,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37127 is added to blk_1073741833_1009 (size=93) 2024-11-14T06:48:56,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37163 is added to blk_1073741833_1009 (size=93) 2024-11-14T06:48:56,009 DEBUG [RS:0;20680646cf8a:40845 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23/oldWALs 2024-11-14T06:48:56,009 INFO [RS:0;20680646cf8a:40845 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 20680646cf8a%2C40845%2C1731566934940:(num 1731566935323) 2024-11-14T06:48:56,009 DEBUG [RS:0;20680646cf8a:40845 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T06:48:56,009 INFO [RS:0;20680646cf8a:40845 {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T06:48:56,009 INFO [RS:0;20680646cf8a:40845 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T06:48:56,009 INFO [RS:0;20680646cf8a:40845 {}] hbase.ChoreService(370): Chore service for: regionserver/20680646cf8a:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-14T06:48:56,009 INFO [RS:0;20680646cf8a:40845 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T06:48:56,010 INFO [regionserver/20680646cf8a:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T06:48:56,010 INFO [RS:0;20680646cf8a:40845 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40845 2024-11-14T06:48:56,011 INFO [RS:0;20680646cf8a:40845 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T06:48:56,011 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39543-0x1003cfdf7ce0000, quorum=127.0.0.1:50296, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T06:48:56,011 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40845-0x1003cfdf7ce0001, quorum=127.0.0.1:50296, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/20680646cf8a,40845,1731566934940 2024-11-14T06:48:56,012 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [20680646cf8a,40845,1731566934940] 2024-11-14T06:48:56,013 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/20680646cf8a,40845,1731566934940 already deleted, retry=false 2024-11-14T06:48:56,013 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 20680646cf8a,40845,1731566934940 expired; onlineServers=0 2024-11-14T06:48:56,013 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '20680646cf8a,39543,1731566934892' ***** 2024-11-14T06:48:56,013 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-14T06:48:56,013 INFO [M:0;20680646cf8a:39543 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T06:48:56,013 INFO [M:0;20680646cf8a:39543 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T06:48:56,013 DEBUG [M:0;20680646cf8a:39543 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-14T06:48:56,013 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-14T06:48:56,013 DEBUG [M:0;20680646cf8a:39543 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-14T06:48:56,013 DEBUG [master/20680646cf8a:0:becomeActiveMaster-HFileCleaner.small.0-1731566935100 {}] cleaner.HFileCleaner(306): Exit Thread[master/20680646cf8a:0:becomeActiveMaster-HFileCleaner.small.0-1731566935100,5,FailOnTimeoutGroup] 2024-11-14T06:48:56,013 DEBUG [master/20680646cf8a:0:becomeActiveMaster-HFileCleaner.large.0-1731566935100 {}] cleaner.HFileCleaner(306): Exit Thread[master/20680646cf8a:0:becomeActiveMaster-HFileCleaner.large.0-1731566935100,5,FailOnTimeoutGroup] 2024-11-14T06:48:56,013 INFO [M:0;20680646cf8a:39543 {}] hbase.ChoreService(370): Chore service for: master/20680646cf8a:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-14T06:48:56,013 INFO [M:0;20680646cf8a:39543 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T06:48:56,013 DEBUG [M:0;20680646cf8a:39543 {}] master.HMaster(1795): Stopping service threads 2024-11-14T06:48:56,014 INFO [M:0;20680646cf8a:39543 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-14T06:48:56,014 INFO [M:0;20680646cf8a:39543 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T06:48:56,014 INFO [M:0;20680646cf8a:39543 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-14T06:48:56,014 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-14T06:48:56,014 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39543-0x1003cfdf7ce0000, quorum=127.0.0.1:50296, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-14T06:48:56,014 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39543-0x1003cfdf7ce0000, quorum=127.0.0.1:50296, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T06:48:56,014 DEBUG [M:0;20680646cf8a:39543 {}] zookeeper.ZKUtil(347): master:39543-0x1003cfdf7ce0000, quorum=127.0.0.1:50296, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-14T06:48:56,015 WARN [M:0;20680646cf8a:39543 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-14T06:48:56,015 INFO [M:0;20680646cf8a:39543 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:35559/user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23/.lastflushedseqids 2024-11-14T06:48:56,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37127 is added to blk_1073741838_1014 (size=99) 2024-11-14T06:48:56,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37163 is added to blk_1073741838_1014 (size=99) 2024-11-14T06:48:56,021 INFO [M:0;20680646cf8a:39543 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-14T06:48:56,021 INFO [M:0;20680646cf8a:39543 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-14T06:48:56,021 DEBUG [M:0;20680646cf8a:39543 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T06:48:56,021 INFO [M:0;20680646cf8a:39543 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T06:48:56,021 DEBUG [M:0;20680646cf8a:39543 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T06:48:56,021 DEBUG [M:0;20680646cf8a:39543 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T06:48:56,021 DEBUG [M:0;20680646cf8a:39543 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T06:48:56,021 INFO [M:0;20680646cf8a:39543 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-14T06:48:56,034 DEBUG [M:0;20680646cf8a:39543 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35559/user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/8dbd1455adb749348cf60da5db14ded7 is 82, key is hbase:meta,,1/info:regioninfo/1731566935722/Put/seqid=0 2024-11-14T06:48:56,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37163 is added to blk_1073741839_1015 (size=5672) 2024-11-14T06:48:56,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37127 is added to blk_1073741839_1015 (size=5672) 2024-11-14T06:48:56,038 INFO [M:0;20680646cf8a:39543 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:35559/user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/8dbd1455adb749348cf60da5db14ded7 2024-11-14T06:48:56,054 DEBUG [M:0;20680646cf8a:39543 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35559/user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c6a5cc44cb89406a99fc6c74f8362bd2 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731566935736/Put/seqid=0 2024-11-14T06:48:56,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37127 is added to blk_1073741840_1016 (size=5275) 2024-11-14T06:48:56,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37163 is added to blk_1073741840_1016 (size=5275) 2024-11-14T06:48:56,059 INFO [M:0;20680646cf8a:39543 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:35559/user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c6a5cc44cb89406a99fc6c74f8362bd2 2024-11-14T06:48:56,077 DEBUG [M:0;20680646cf8a:39543 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35559/user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9ef402bbd16048c990a6d5cfa8861842 is 69, key is 20680646cf8a,40845,1731566934940/rs:state/1731566935181/Put/seqid=0 2024-11-14T06:48:56,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37127 is added to blk_1073741841_1017 (size=5156) 2024-11-14T06:48:56,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37163 is added to blk_1073741841_1017 (size=5156) 2024-11-14T06:48:56,082 INFO [M:0;20680646cf8a:39543 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:35559/user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9ef402bbd16048c990a6d5cfa8861842 2024-11-14T06:48:56,105 DEBUG [M:0;20680646cf8a:39543 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35559/user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/b759b76b2a174b44a79c0cde8c417d21 is 52, key is load_balancer_on/state:d/1731566935772/Put/seqid=0 2024-11-14T06:48:56,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37163 is added to blk_1073741842_1018 (size=5056) 2024-11-14T06:48:56,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37127 is added to blk_1073741842_1018 (size=5056) 2024-11-14T06:48:56,110 INFO [M:0;20680646cf8a:39543 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:35559/user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/b759b76b2a174b44a79c0cde8c417d21 2024-11-14T06:48:56,112 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40845-0x1003cfdf7ce0001, quorum=127.0.0.1:50296, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T06:48:56,112 INFO [RS:0;20680646cf8a:40845 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T06:48:56,112 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40845-0x1003cfdf7ce0001, quorum=127.0.0.1:50296, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T06:48:56,112 INFO [RS:0;20680646cf8a:40845 {}] regionserver.HRegionServer(1031): Exiting; stopping=20680646cf8a,40845,1731566934940; zookeeper connection closed. 2024-11-14T06:48:56,112 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@35661e11 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@35661e11 2024-11-14T06:48:56,112 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-14T06:48:56,114 DEBUG [M:0;20680646cf8a:39543 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35559/user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/8dbd1455adb749348cf60da5db14ded7 as hdfs://localhost:35559/user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/8dbd1455adb749348cf60da5db14ded7 2024-11-14T06:48:56,118 INFO [M:0;20680646cf8a:39543 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35559/user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/8dbd1455adb749348cf60da5db14ded7, entries=8, sequenceid=29, filesize=5.5 K 2024-11-14T06:48:56,118 DEBUG [M:0;20680646cf8a:39543 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35559/user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c6a5cc44cb89406a99fc6c74f8362bd2 as hdfs://localhost:35559/user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c6a5cc44cb89406a99fc6c74f8362bd2 2024-11-14T06:48:56,122 INFO [M:0;20680646cf8a:39543 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35559/user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c6a5cc44cb89406a99fc6c74f8362bd2, entries=3, sequenceid=29, filesize=5.2 K 2024-11-14T06:48:56,123 DEBUG [M:0;20680646cf8a:39543 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35559/user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9ef402bbd16048c990a6d5cfa8861842 as hdfs://localhost:35559/user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/9ef402bbd16048c990a6d5cfa8861842 2024-11-14T06:48:56,126 INFO [M:0;20680646cf8a:39543 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35559/user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/9ef402bbd16048c990a6d5cfa8861842, entries=1, sequenceid=29, filesize=5.0 K 2024-11-14T06:48:56,127 DEBUG [M:0;20680646cf8a:39543 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35559/user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/b759b76b2a174b44a79c0cde8c417d21 as hdfs://localhost:35559/user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/b759b76b2a174b44a79c0cde8c417d21 2024-11-14T06:48:56,130 INFO [M:0;20680646cf8a:39543 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35559/user/jenkins/test-data/59daccda-636e-dfea-da57-398e20502e23/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/b759b76b2a174b44a79c0cde8c417d21, entries=1, sequenceid=29, filesize=4.9 K 2024-11-14T06:48:56,131 INFO [M:0;20680646cf8a:39543 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 110ms, sequenceid=29, compaction requested=false 2024-11-14T06:48:56,133 INFO [M:0;20680646cf8a:39543 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T06:48:56,133 DEBUG [M:0;20680646cf8a:39543 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731566936021Disabling compacts and flushes for region at 1731566936021Disabling writes for close at 1731566936021Obtaining lock to block concurrent updates at 1731566936021Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731566936021Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731566936022 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731566936022Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731566936022Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731566936034 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731566936034Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731566936042 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731566936054 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731566936054Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731566936063 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731566936076 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731566936076Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731566936086 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731566936104 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731566936104Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@29528e1b: reopening flushed file at 1731566936114 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7f16165d: reopening flushed file at 1731566936118 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@49725cfb: reopening flushed file at 1731566936122 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7ba578aa: reopening flushed file at 1731566936126 (+4 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 110ms, sequenceid=29, compaction requested=false at 1731566936131 (+5 ms)Writing region close event to WAL at 1731566936133 (+2 ms)Closed at 1731566936133 2024-11-14T06:48:56,133 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:56,133 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:56,133 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:56,133 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:56,133 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T06:48:56,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37163 is added to blk_1073741830_1006 (size=10311) 2024-11-14T06:48:56,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37127 is added to blk_1073741830_1006 (size=10311) 2024-11-14T06:48:56,135 INFO [M:0;20680646cf8a:39543 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-14T06:48:56,135 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T06:48:56,135 INFO [M:0;20680646cf8a:39543 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39543 2024-11-14T06:48:56,136 INFO [M:0;20680646cf8a:39543 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T06:48:56,237 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39543-0x1003cfdf7ce0000, quorum=127.0.0.1:50296, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T06:48:56,237 INFO [M:0;20680646cf8a:39543 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T06:48:56,237 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39543-0x1003cfdf7ce0000, quorum=127.0.0.1:50296, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T06:48:56,244 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@45235d1a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T06:48:56,245 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2527c84a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T06:48:56,245 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T06:48:56,245 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@e818af2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T06:48:56,246 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@9268180{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/693bae84-f601-168e-b4ff-c662493b7d49/hadoop.log.dir/,STOPPED} 2024-11-14T06:48:56,247 WARN [BP-2076026094-172.17.0.2-1731566934330 heartbeating to localhost/127.0.0.1:35559 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T06:48:56,247 WARN [BP-2076026094-172.17.0.2-1731566934330 heartbeating to localhost/127.0.0.1:35559 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2076026094-172.17.0.2-1731566934330 (Datanode Uuid 493d5b00-28b0-4b14-912a-10a6ab2cd91d) service to localhost/127.0.0.1:35559 2024-11-14T06:48:56,247 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T06:48:56,247 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T06:48:56,247 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/693bae84-f601-168e-b4ff-c662493b7d49/cluster_9dc0f410-2e85-675d-e08d-7b710d1cab13/data/data3/current/BP-2076026094-172.17.0.2-1731566934330 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T06:48:56,248 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/693bae84-f601-168e-b4ff-c662493b7d49/cluster_9dc0f410-2e85-675d-e08d-7b710d1cab13/data/data4/current/BP-2076026094-172.17.0.2-1731566934330 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T06:48:56,248 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T06:48:56,249 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@265f6a26{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T06:48:56,250 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7d6e4d22{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T06:48:56,250 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T06:48:56,250 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@20042785{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T06:48:56,250 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7b01e479{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/693bae84-f601-168e-b4ff-c662493b7d49/hadoop.log.dir/,STOPPED} 2024-11-14T06:48:56,251 WARN [BP-2076026094-172.17.0.2-1731566934330 heartbeating to localhost/127.0.0.1:35559 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T06:48:56,251 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T06:48:56,251 WARN [BP-2076026094-172.17.0.2-1731566934330 heartbeating to localhost/127.0.0.1:35559 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2076026094-172.17.0.2-1731566934330 (Datanode Uuid 7bfc013a-00aa-4e99-a5e9-67b880910def) service to localhost/127.0.0.1:35559 2024-11-14T06:48:56,251 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T06:48:56,252 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/693bae84-f601-168e-b4ff-c662493b7d49/cluster_9dc0f410-2e85-675d-e08d-7b710d1cab13/data/data1/current/BP-2076026094-172.17.0.2-1731566934330 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T06:48:56,252 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/693bae84-f601-168e-b4ff-c662493b7d49/cluster_9dc0f410-2e85-675d-e08d-7b710d1cab13/data/data2/current/BP-2076026094-172.17.0.2-1731566934330 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T06:48:56,252 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T06:48:56,257 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5be97557{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T06:48:56,257 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6ce66601{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T06:48:56,257 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T06:48:56,257 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1a024a47{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T06:48:56,257 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1e07b1d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/693bae84-f601-168e-b4ff-c662493b7d49/hadoop.log.dir/,STOPPED} 2024-11-14T06:48:56,263 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-14T06:48:56,282 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-14T06:48:56,290 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=269 (was 229) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35559 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35559 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:35559 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35559 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35559 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35559 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: globalEventExecutor-1-20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//io.netty.util.concurrent.GlobalEventExecutor.takeTask(GlobalEventExecutor.java:113) app//io.netty.util.concurrent.GlobalEventExecutor$TaskRunner.run(GlobalEventExecutor.java:259) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:35559 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35559 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=538 (was 515) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=179 (was 169) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=693 (was 719)